Skip to content

model.py

Code adapted from the Mathis Lab MIT License Copyright (c) 2022 Mackenzie Mathis DataJoint Schema for DeepLabCut 2.x, Supports 2D and 3D DLC via triangulation.

activate(model_schema_name, *, create_schema=True, create_tables=True, linking_module=None)

Activate this schema.

Parameters:

Name Type Description Default
model_schema_name str

schema name on the database server

required
create_schema bool

when True (default), create schema in the database if it does not yet exist.

True
create_tables bool

when True (default), create schema tables in the database if they do not yet exist.

True
linking_module str

a module (or name) containing the required dependencies.

None

Dependencies: Upstream tables: Session: A parent table to VideoRecording, identifying a recording session. Equipment: A parent table to VideoRecording, identifying a recording device. Functions: get_dlc_root_data_dir(): Returns absolute path for root data director(y/ies) with all behavioral recordings, as (list of) string(s). get_dlc_processed_data_dir(): Optional. Returns absolute path for processed data. Defaults to session video subfolder.

Source code in element_deeplabcut/model.py
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
def activate(
    model_schema_name: str,
    *,
    create_schema: bool = True,
    create_tables: bool = True,
    linking_module: bool = None,
):
    """Activate this schema.

    Args:
        model_schema_name (str): schema name on the database server
        create_schema (bool): when True (default), create schema in the database if it
                            does not yet exist.
        create_tables (bool): when True (default), create schema tables in the database
                             if they do not yet exist.
        linking_module (str): a module (or name) containing the required dependencies.

    Dependencies:
    Upstream tables:
        Session: A parent table to VideoRecording, identifying a recording session.
        Equipment: A parent table to VideoRecording, identifying a recording device.
    Functions:
        get_dlc_root_data_dir(): Returns absolute path for root data director(y/ies)
                                 with all behavioral recordings, as (list of) string(s).
        get_dlc_processed_data_dir(): Optional. Returns absolute path for processed
                                      data. Defaults to session video subfolder.
    """

    if isinstance(linking_module, str):
        linking_module = importlib.import_module(linking_module)
    assert inspect.ismodule(
        linking_module
    ), "The argument 'dependency' must be a module's name or a module"
    assert hasattr(
        linking_module, "get_dlc_root_data_dir"
    ), "The linking module must specify a lookup function for a root data directory"

    global _linking_module
    _linking_module = linking_module

    # activate
    schema.activate(
        model_schema_name,
        create_schema=create_schema,
        create_tables=create_tables,
        add_objects=_linking_module.__dict__,
    )

get_dlc_root_data_dir()

Pulls relevant func from parent namespace to specify root data dir(s).

It is recommended that all paths in DataJoint Elements stored as relative paths, with respect to some user-configured "root" director(y/ies). The root(s) may vary between data modalities and user machines. Returns a full path string or list of strings for possible root data directories.

Source code in element_deeplabcut/model.py
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
def get_dlc_root_data_dir() -> list:
    """Pulls relevant func from parent namespace to specify root data dir(s).

    It is recommended that all paths in DataJoint Elements stored as relative
    paths, with respect to some user-configured "root" director(y/ies). The
    root(s) may vary between data modalities and user machines. Returns a full path
    string or list of strings for possible root data directories.
    """
    root_directories = _linking_module.get_dlc_root_data_dir()
    if isinstance(root_directories, (str, Path)):
        root_directories = [root_directories]

    if (
        hasattr(_linking_module, "get_dlc_processed_data_dir")
        and get_dlc_processed_data_dir() not in root_directories
    ):
        root_directories.append(_linking_module.get_dlc_processed_data_dir())

    return root_directories

get_dlc_processed_data_dir()

Pulls relevant func from parent namespace. Defaults to DLC's project /videos/.

Method in parent namespace should provide a string to a directory where DLC output files will be stored. If unspecified, output files will be stored in the session directory 'videos' folder, per DeepLabCut default.

Source code in element_deeplabcut/model.py
 99
100
101
102
103
104
105
106
107
108
109
def get_dlc_processed_data_dir() -> Optional[str]:
    """Pulls relevant func from parent namespace. Defaults to DLC's project /videos/.

    Method in parent namespace should provide a string to a directory where DLC output
    files will be stored. If unspecified, output files will be stored in the
    session directory 'videos' folder, per DeepLabCut default.
    """
    if hasattr(_linking_module, "get_dlc_processed_data_dir"):
        return _linking_module.get_dlc_processed_data_dir()
    else:
        return None

VideoRecording

Bases: Manual

Set of video recordings for DLC inferences.

Attributes:

Name Type Description
Session foreign key

Session primary key.

recording_id int

Unique recording ID.

Device foreign key

Device table primary key, used for default output directory path information.

Source code in element_deeplabcut/model.py
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
@schema
class VideoRecording(dj.Manual):
    """Set of video recordings for DLC inferences.

    Attributes:
        Session (foreign key): Session primary key.
        recording_id (int): Unique recording ID.
        Device (foreign key): Device table primary key, used for default output
            directory path information.
    """

    definition = """
    -> Session
    recording_id: int
    ---
    -> Device
    """

    class File(dj.Part):
        """File IDs and paths associated with a given recording_id

        Attributes:
            VideoRecording (foreign key): Video recording primary key.
            file_path ( varchar(255) ): file path of video, relative to root data dir.
        """

        definition = """
        -> master
        file_id: int
        ---
        file_path: varchar(255)  # filepath of video, relative to root data directory
        """

File

Bases: Part

File IDs and paths associated with a given recording_id

Attributes:

Name Type Description
VideoRecording foreign key

Video recording primary key.

file_path varchar(255)

file path of video, relative to root data dir.

Source code in element_deeplabcut/model.py
133
134
135
136
137
138
139
140
141
142
143
144
145
146
class File(dj.Part):
    """File IDs and paths associated with a given recording_id

    Attributes:
        VideoRecording (foreign key): Video recording primary key.
        file_path ( varchar(255) ): file path of video, relative to root data dir.
    """

    definition = """
    -> master
    file_id: int
    ---
    file_path: varchar(255)  # filepath of video, relative to root data directory
    """

RecordingInfo

Bases: Imported

Automated table with video file metadata.

Attributes:

Name Type Description
VideoRecording foreign key

Video recording key.

px_height smallint

Height in pixels.

px_width smallint

Width in pixels.

nframes int

Number of frames.

fps int

Optional. Frames per second, Hz.

recording_datetime datetime

Optional. Datetime for the start of recording.

recording_duration float

video duration (s) from nframes / fps.

Source code in element_deeplabcut/model.py
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
@schema
class RecordingInfo(dj.Imported):
    """Automated table with video file metadata.

    Attributes:
        VideoRecording (foreign key): Video recording key.
        px_height (smallint): Height in pixels.
        px_width (smallint): Width in pixels.
        nframes (int): Number of frames.
        fps (int): Optional. Frames per second, Hz.
        recording_datetime (datetime): Optional. Datetime for the start of recording.
        recording_duration (float): video duration (s) from nframes / fps."""

    definition = """
    -> VideoRecording
    ---
    px_height                 : smallint  # height in pixels
    px_width                  : smallint  # width in pixels
    nframes                   : int  # number of frames 
    fps = NULL                : int       # (Hz) frames per second
    recording_datetime = NULL : datetime  # Datetime for the start of the recording
    recording_duration        : float     # video duration (s) from nframes / fps
    """

    @property
    def key_source(self):
        """Defines order of keys for make function when called via `populate()`"""
        return VideoRecording & VideoRecording.File

    def make(self, key):
        """Populates table with video metadata using CV2."""
        file_paths = (VideoRecording.File & key).fetch("file_path")

        nframes = 0
        px_height, px_width, fps = None, None, None

        for file_path in file_paths:
            file_path = (find_full_path(get_dlc_root_data_dir(), file_path)).as_posix()

            cap = cv2.VideoCapture(file_path)
            info = (
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
                int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FPS)),
            )
            if px_height is not None:
                assert (px_height, px_width, fps) == info
            px_height, px_width, fps = info
            nframes += int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            cap.release()

        self.insert1(
            {
                **key,
                "px_height": px_height,
                "px_width": px_width,
                "nframes": nframes,
                "fps": fps,
                "recording_duration": nframes / fps,
            }
        )

key_source property

Defines order of keys for make function when called via populate()

make(key)

Populates table with video metadata using CV2.

Source code in element_deeplabcut/model.py
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
def make(self, key):
    """Populates table with video metadata using CV2."""
    file_paths = (VideoRecording.File & key).fetch("file_path")

    nframes = 0
    px_height, px_width, fps = None, None, None

    for file_path in file_paths:
        file_path = (find_full_path(get_dlc_root_data_dir(), file_path)).as_posix()

        cap = cv2.VideoCapture(file_path)
        info = (
            int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
            int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
            int(cap.get(cv2.CAP_PROP_FPS)),
        )
        if px_height is not None:
            assert (px_height, px_width, fps) == info
        px_height, px_width, fps = info
        nframes += int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        cap.release()

    self.insert1(
        {
            **key,
            "px_height": px_height,
            "px_width": px_width,
            "nframes": nframes,
            "fps": fps,
            "recording_duration": nframes / fps,
        }
    )

BodyPart

Bases: Lookup

Body parts tracked by DeepLabCut models

Attributes:

Name Type Description
body_part varchar(32)

Body part short name.

body_part_description varchar(1000),optional

Full description

Source code in element_deeplabcut/model.py
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
@schema
class BodyPart(dj.Lookup):
    """Body parts tracked by DeepLabCut models

    Attributes:
        body_part ( varchar(32) ): Body part short name.
        body_part_description ( varchar(1000),optional ): Full description

    """

    definition = """
    body_part                : varchar(32)
    ---
    body_part_description='' : varchar(1000)
    """

    @classmethod
    def extract_new_body_parts(cls, dlc_config: dict, verbose: bool = True):
        """Returns list of body parts present in dlc config, but not BodyPart table.

        Args:
            dlc_config ( varchar(255) ): Path to a config.y*ml.
            verbose (bool): Default True. Print both existing and new items to console.
        """
        if not isinstance(dlc_config, dict):
            dlc_config_fp = find_full_path(get_dlc_root_data_dir(), Path(dlc_config))
            assert dlc_config_fp.exists() and dlc_config_fp.suffix in (
                ".yml",
                ".yaml",
            ), f"dlc_config is neither dict nor filepath\n Check: {dlc_config_fp}"
            if dlc_config_fp.suffix in (".yml", ".yaml"):
                yaml = YAML(typ="safe", pure=True)
                with open(dlc_config_fp, "rb") as f:
                    dlc_config = yaml.load(f)
        # -- Check and insert new BodyPart --
        assert "bodyparts" in dlc_config, f"Found no bodyparts section in {dlc_config}"
        tracked_body_parts = cls.fetch("body_part")
        new_body_parts = np.setdiff1d(dlc_config["bodyparts"], tracked_body_parts)
        if verbose:  # Added to silence duplicate prompt during `insert_new_model`
            print(f"Existing body parts: {tracked_body_parts}")
            print(f"New body parts: {new_body_parts}")
        return new_body_parts

    @classmethod
    def insert_from_config(
        cls, dlc_config: dict, descriptions: list = None, prompt=True
    ):
        """Insert all body parts from a config file.

        Args:
            dlc_config ( varchar(255) ): Path to a config.y*ml.
            descriptions (list): Optional. List of strings describing new body parts.
            prompt (bool): Optional, default True. Prompt for confirmation before insert.
        """

        # handle dlc_config being a yaml file
        new_body_parts = cls.extract_new_body_parts(dlc_config, verbose=False)
        if new_body_parts is not None:  # Required bc np.array is ambiguous as bool
            if descriptions:
                assert len(descriptions) == len(new_body_parts), (
                    "Descriptions list does not match "
                    + " the number of new_body_parts"
                )
                print(f"New descriptions: {descriptions}")
            if descriptions is None:
                descriptions = ["" for x in range(len(new_body_parts))]

            if (
                prompt
                and dj.utils.user_choice(
                    f"Insert {len(new_body_parts)} new body " + "part(s)?"
                )
                != "yes"
            ):
                print("Canceled insert.")
                return
            cls.insert(
                [
                    {"body_part": b, "body_part_description": d}
                    for b, d in zip(new_body_parts, descriptions)
                ]
            )

extract_new_body_parts(dlc_config, verbose=True) classmethod

Returns list of body parts present in dlc config, but not BodyPart table.

Parameters:

Name Type Description Default
dlc_config varchar(255)

Path to a config.y*ml.

required
verbose bool

Default True. Print both existing and new items to console.

True
Source code in element_deeplabcut/model.py
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
@classmethod
def extract_new_body_parts(cls, dlc_config: dict, verbose: bool = True):
    """Returns list of body parts present in dlc config, but not BodyPart table.

    Args:
        dlc_config ( varchar(255) ): Path to a config.y*ml.
        verbose (bool): Default True. Print both existing and new items to console.
    """
    if not isinstance(dlc_config, dict):
        dlc_config_fp = find_full_path(get_dlc_root_data_dir(), Path(dlc_config))
        assert dlc_config_fp.exists() and dlc_config_fp.suffix in (
            ".yml",
            ".yaml",
        ), f"dlc_config is neither dict nor filepath\n Check: {dlc_config_fp}"
        if dlc_config_fp.suffix in (".yml", ".yaml"):
            yaml = YAML(typ="safe", pure=True)
            with open(dlc_config_fp, "rb") as f:
                dlc_config = yaml.load(f)
    # -- Check and insert new BodyPart --
    assert "bodyparts" in dlc_config, f"Found no bodyparts section in {dlc_config}"
    tracked_body_parts = cls.fetch("body_part")
    new_body_parts = np.setdiff1d(dlc_config["bodyparts"], tracked_body_parts)
    if verbose:  # Added to silence duplicate prompt during `insert_new_model`
        print(f"Existing body parts: {tracked_body_parts}")
        print(f"New body parts: {new_body_parts}")
    return new_body_parts

insert_from_config(dlc_config, descriptions=None, prompt=True) classmethod

Insert all body parts from a config file.

Parameters:

Name Type Description Default
dlc_config varchar(255)

Path to a config.y*ml.

required
descriptions list

Optional. List of strings describing new body parts.

None
prompt bool

Optional, default True. Prompt for confirmation before insert.

True
Source code in element_deeplabcut/model.py
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
@classmethod
def insert_from_config(
    cls, dlc_config: dict, descriptions: list = None, prompt=True
):
    """Insert all body parts from a config file.

    Args:
        dlc_config ( varchar(255) ): Path to a config.y*ml.
        descriptions (list): Optional. List of strings describing new body parts.
        prompt (bool): Optional, default True. Prompt for confirmation before insert.
    """

    # handle dlc_config being a yaml file
    new_body_parts = cls.extract_new_body_parts(dlc_config, verbose=False)
    if new_body_parts is not None:  # Required bc np.array is ambiguous as bool
        if descriptions:
            assert len(descriptions) == len(new_body_parts), (
                "Descriptions list does not match "
                + " the number of new_body_parts"
            )
            print(f"New descriptions: {descriptions}")
        if descriptions is None:
            descriptions = ["" for x in range(len(new_body_parts))]

        if (
            prompt
            and dj.utils.user_choice(
                f"Insert {len(new_body_parts)} new body " + "part(s)?"
            )
            != "yes"
        ):
            print("Canceled insert.")
            return
        cls.insert(
            [
                {"body_part": b, "body_part_description": d}
                for b, d in zip(new_body_parts, descriptions)
            ]
        )

Model

Bases: Manual

DeepLabCut Models applied to generate pose estimations.

Attributes:

Name Type Description
model_name varchar(64)

User-friendly model name.

task varchar(32)

Task in the config yaml.

date varchar(16)

Date in the config yaml.

iteration int

Iteration/version of this model.

snapshotindex int

Which snapshot for prediction (if -1, latest).

shuffle int

Which shuffle of the training dataset.

trainingsetindex int

Which training set fraction to generate model.

scorer varchar(64)

Scorer/network name - DLC's GetScorerName().

config_template longblob

Dictionary of the config for analyze_videos().

project_path varchar(255)

DLC's project_path in config relative to root.

model_prefix varchar(32)

Optional. Prefix for model files.

model_description varchar(300)

Optional. User-entered description.

TrainingParamSet foreign key

Optional. Training parameters primary key.

Note

Models are uniquely identified by the union of task, date, iteration, shuffle, snapshotindex, and trainingsetindex.

Source code in element_deeplabcut/model.py
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
@schema
class Model(dj.Manual):
    """DeepLabCut Models applied to generate pose estimations.

    Attributes:
        model_name ( varchar(64) ): User-friendly model name.
        task ( varchar(32) ): Task in the config yaml.
        date ( varchar(16) ): Date in the config yaml.
        iteration (int): Iteration/version of this model.
        snapshotindex (int): Which snapshot for prediction (if -1, latest).
        shuffle (int): Which shuffle of the training dataset.
        trainingsetindex (int): Which training set fraction to generate model.
        scorer ( varchar(64) ): Scorer/network name - DLC's GetScorerName().
        config_template (longblob): Dictionary of the config for analyze_videos().
        project_path ( varchar(255) ): DLC's project_path in config relative to root.
        model_prefix ( varchar(32) ): Optional. Prefix for model files.
        model_description ( varchar(300) ): Optional. User-entered description.
        TrainingParamSet (foreign key): Optional. Training parameters primary key.

    Note:
        Models are uniquely identified by the union of task, date, iteration, shuffle,
        snapshotindex, and trainingsetindex.
    """

    definition = """
    model_name           : varchar(64)  # User-friendly model name
    ---
    task                 : varchar(32)  # Task in the config yaml
    date                 : varchar(16)  # Date in the config yaml
    iteration            : int          # Iteration/version of this model
    snapshotindex        : int          # which snapshot for prediction (if -1, latest)
    shuffle              : int          # Shuffle (1) or not (0)
    trainingsetindex     : int          # Index of training fraction list in config.yaml
    unique index (task, date, iteration, shuffle, snapshotindex, trainingsetindex)
    scorer               : varchar(64)  # Scorer/network name - DLC's GetScorerName()
    config_template      : longblob     # Dictionary of the config for analyze_videos()
    project_path         : varchar(255) # DLC's project_path in config relative to root
    model_prefix=''      : varchar(32)
    model_description='' : varchar(300)
    -> [nullable] train.TrainingParamSet
    """
    # project_path is the only item required downstream in the pose schema

    class BodyPart(dj.Part):
        """Body parts associated with a given model

        Attributes:
            body_part ( varchar(32) ): Short name. Also called joint.
            body_part_description ( varchar(1000) ): Optional. Longer description."""

        definition = """
        -> master
        -> BodyPart
        """

    @classmethod
    def insert_new_model(
        cls,
        model_name: str,
        dlc_config,
        *,
        shuffle: int,
        trainingsetindex,
        model_description="",
        model_prefix="",
        paramset_idx: int = None,
        prompt=True,
        params=None,
    ):
        """Insert new model into the dlc.Model table.

        Args:
            model_name (str): User-friendly name for this model.
            dlc_config ( varchar(255) ): Path to a config.y*ml.
            shuffle (int): Which shuffle of the training dataset.
            trainingsetindex (int): Index of training fraction list in config.yaml.
            model_description (str): Optional. Description of this model.
            model_prefix (str): Optional. Filename prefix used across DLC project
            paramset_idx (int): Optional. Index from the TrainingParamSet table
            prompt (bool): Optional. Prompt the user with all info before inserting.
            params (dict): Optional. If dlc_config is path, dict of override items
        """

        from deeplabcut.utils.auxiliaryfunctions import GetScorerName  # isort:skip

        # handle dlc_config being a yaml file
        dlc_config_fp = find_full_path(get_dlc_root_data_dir(), Path(dlc_config))
        assert dlc_config_fp.exists(), (
            "dlc_config is not a filepath" + f"\n Check: {dlc_config_fp}"
        )
        if dlc_config_fp.suffix in (".yml", ".yaml"):
            yaml = YAML(typ="safe", pure=True)
            with open(dlc_config_fp, "rb") as f:
                dlc_config = yaml.load(f)
        if isinstance(params, dict):
            dlc_config.update(params)

        # ---- Get and resolve project path ----
        project_path = dlc_config_fp.parent
        dlc_config["project_path"] = project_path.as_posix()  # update if different
        root_dir = find_root_directory(get_dlc_root_data_dir(), project_path)

        # ---- Verify config ----
        needed_attributes = [
            "Task",
            "date",
            "iteration",
            "snapshotindex",
            "TrainingFraction",
        ]
        for attribute in needed_attributes:
            assert attribute in dlc_config, f"Couldn't find {attribute} in config"

        # ---- Get scorer name ----
        # "or 'f'" below covers case where config returns None. str_to_bool handles else
        scorer_legacy = str_to_bool(dlc_config.get("scorer_legacy", "f"))

        dlc_scorer = GetScorerName(
            cfg=dlc_config,
            shuffle=shuffle,
            trainFraction=dlc_config["TrainingFraction"][int(trainingsetindex)],
            modelprefix=model_prefix,
        )[scorer_legacy]
        if dlc_config["snapshotindex"] == -1:
            dlc_scorer = "".join(dlc_scorer.split("_")[:-1])

        # ---- Insert ----
        model_dict = {
            "model_name": model_name,
            "model_description": model_description,
            "scorer": dlc_scorer,
            "task": dlc_config["Task"],
            "date": dlc_config["date"],
            "iteration": dlc_config["iteration"],
            "snapshotindex": dlc_config["snapshotindex"],
            "shuffle": shuffle,
            "trainingsetindex": int(trainingsetindex),
            "project_path": project_path.relative_to(root_dir).as_posix(),
            "paramset_idx": paramset_idx,
            "config_template": dlc_config,
        }

        # -- prompt for confirmation --
        if prompt:
            print("--- DLC Model specification to be inserted ---")
            for k, v in model_dict.items():
                if k != "config_template":
                    print("\t{}: {}".format(k, v))
                else:
                    print("\t-- Template/Contents of config.yaml --")
                    for k, v in model_dict["config_template"].items():
                        print("\t\t{}: {}".format(k, v))

        if (
            prompt
            and dj.utils.user_choice("Proceed with new DLC model insert?") != "yes"
        ):
            print("Canceled insert.")
            return

        def _do_insert():
            cls.insert1(model_dict)
            # Returns array, so check size for unambiguous truth value
            if BodyPart.extract_new_body_parts(dlc_config, verbose=False).size > 0:
                BodyPart.insert_from_config(dlc_config, prompt=prompt)
            cls.BodyPart.insert((model_name, bp) for bp in dlc_config["bodyparts"])

        # ____ Insert into table ----
        if cls.connection.in_transaction:
            _do_insert()
        else:
            with cls.connection.transaction:
                _do_insert()

BodyPart

Bases: Part

Body parts associated with a given model

Attributes:

Name Type Description
body_part varchar(32)

Short name. Also called joint.

body_part_description varchar(1000)

Optional. Longer description.

Source code in element_deeplabcut/model.py
339
340
341
342
343
344
345
346
347
348
349
class BodyPart(dj.Part):
    """Body parts associated with a given model

    Attributes:
        body_part ( varchar(32) ): Short name. Also called joint.
        body_part_description ( varchar(1000) ): Optional. Longer description."""

    definition = """
    -> master
    -> BodyPart
    """

insert_new_model(model_name, dlc_config, *, shuffle, trainingsetindex, model_description='', model_prefix='', paramset_idx=None, prompt=True, params=None) classmethod

Insert new model into the dlc.Model table.

Parameters:

Name Type Description Default
model_name str

User-friendly name for this model.

required
dlc_config varchar(255)

Path to a config.y*ml.

required
shuffle int

Which shuffle of the training dataset.

required
trainingsetindex int

Index of training fraction list in config.yaml.

required
model_description str

Optional. Description of this model.

''
model_prefix str

Optional. Filename prefix used across DLC project

''
paramset_idx int

Optional. Index from the TrainingParamSet table

None
prompt bool

Optional. Prompt the user with all info before inserting.

True
params dict

Optional. If dlc_config is path, dict of override items

None
Source code in element_deeplabcut/model.py
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
@classmethod
def insert_new_model(
    cls,
    model_name: str,
    dlc_config,
    *,
    shuffle: int,
    trainingsetindex,
    model_description="",
    model_prefix="",
    paramset_idx: int = None,
    prompt=True,
    params=None,
):
    """Insert new model into the dlc.Model table.

    Args:
        model_name (str): User-friendly name for this model.
        dlc_config ( varchar(255) ): Path to a config.y*ml.
        shuffle (int): Which shuffle of the training dataset.
        trainingsetindex (int): Index of training fraction list in config.yaml.
        model_description (str): Optional. Description of this model.
        model_prefix (str): Optional. Filename prefix used across DLC project
        paramset_idx (int): Optional. Index from the TrainingParamSet table
        prompt (bool): Optional. Prompt the user with all info before inserting.
        params (dict): Optional. If dlc_config is path, dict of override items
    """

    from deeplabcut.utils.auxiliaryfunctions import GetScorerName  # isort:skip

    # handle dlc_config being a yaml file
    dlc_config_fp = find_full_path(get_dlc_root_data_dir(), Path(dlc_config))
    assert dlc_config_fp.exists(), (
        "dlc_config is not a filepath" + f"\n Check: {dlc_config_fp}"
    )
    if dlc_config_fp.suffix in (".yml", ".yaml"):
        yaml = YAML(typ="safe", pure=True)
        with open(dlc_config_fp, "rb") as f:
            dlc_config = yaml.load(f)
    if isinstance(params, dict):
        dlc_config.update(params)

    # ---- Get and resolve project path ----
    project_path = dlc_config_fp.parent
    dlc_config["project_path"] = project_path.as_posix()  # update if different
    root_dir = find_root_directory(get_dlc_root_data_dir(), project_path)

    # ---- Verify config ----
    needed_attributes = [
        "Task",
        "date",
        "iteration",
        "snapshotindex",
        "TrainingFraction",
    ]
    for attribute in needed_attributes:
        assert attribute in dlc_config, f"Couldn't find {attribute} in config"

    # ---- Get scorer name ----
    # "or 'f'" below covers case where config returns None. str_to_bool handles else
    scorer_legacy = str_to_bool(dlc_config.get("scorer_legacy", "f"))

    dlc_scorer = GetScorerName(
        cfg=dlc_config,
        shuffle=shuffle,
        trainFraction=dlc_config["TrainingFraction"][int(trainingsetindex)],
        modelprefix=model_prefix,
    )[scorer_legacy]
    if dlc_config["snapshotindex"] == -1:
        dlc_scorer = "".join(dlc_scorer.split("_")[:-1])

    # ---- Insert ----
    model_dict = {
        "model_name": model_name,
        "model_description": model_description,
        "scorer": dlc_scorer,
        "task": dlc_config["Task"],
        "date": dlc_config["date"],
        "iteration": dlc_config["iteration"],
        "snapshotindex": dlc_config["snapshotindex"],
        "shuffle": shuffle,
        "trainingsetindex": int(trainingsetindex),
        "project_path": project_path.relative_to(root_dir).as_posix(),
        "paramset_idx": paramset_idx,
        "config_template": dlc_config,
    }

    # -- prompt for confirmation --
    if prompt:
        print("--- DLC Model specification to be inserted ---")
        for k, v in model_dict.items():
            if k != "config_template":
                print("\t{}: {}".format(k, v))
            else:
                print("\t-- Template/Contents of config.yaml --")
                for k, v in model_dict["config_template"].items():
                    print("\t\t{}: {}".format(k, v))

    if (
        prompt
        and dj.utils.user_choice("Proceed with new DLC model insert?") != "yes"
    ):
        print("Canceled insert.")
        return

    def _do_insert():
        cls.insert1(model_dict)
        # Returns array, so check size for unambiguous truth value
        if BodyPart.extract_new_body_parts(dlc_config, verbose=False).size > 0:
            BodyPart.insert_from_config(dlc_config, prompt=prompt)
        cls.BodyPart.insert((model_name, bp) for bp in dlc_config["bodyparts"])

    # ____ Insert into table ----
    if cls.connection.in_transaction:
        _do_insert()
    else:
        with cls.connection.transaction:
            _do_insert()

ModelEvaluation

Bases: Computed

Performance characteristics model calculated by deeplabcut.evaluate_network

Attributes:

Name Type Description
Model foreign key

Model name.

train_iterations int

Training iterations.

train_error float

Optional. Train error (px).

test_error float

Optional. Test error (px).

p_cutoff float

Optional. p-cutoff used.

train_error_p float

Optional. Train error with p-cutoff.

test_error_p float

Optional. Test error with p-cutoff.

Source code in element_deeplabcut/model.py
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
@schema
class ModelEvaluation(dj.Computed):
    """Performance characteristics model calculated by `deeplabcut.evaluate_network`

    Attributes:
        Model (foreign key): Model name.
        train_iterations (int): Training iterations.
        train_error (float): Optional. Train error (px).
        test_error (float): Optional. Test error (px).
        p_cutoff (float): Optional. p-cutoff used.
        train_error_p (float): Optional. Train error with p-cutoff.
        test_error_p (float): Optional. Test error with p-cutoff."""

    definition = """
    -> Model
    ---
    train_iterations   : int   # Training iterations
    train_error=null   : float # Train error (px)
    test_error=null    : float # Test error (px)
    p_cutoff=null      : float # p-cutoff used
    train_error_p=null : float # Train error with p-cutoff
    test_error_p=null  : float # Test error with p-cutoff
    """

    def make(self, key):
        from deeplabcut import evaluate_network  # isort:skip
        from deeplabcut.utils.auxiliaryfunctions import (
            get_evaluation_folder,
        )  # isort:skip

        """.populate() method will launch evaluation for each unique entry in Model."""
        dlc_config, project_path, model_prefix, shuffle, trainingsetindex = (
            Model & key
        ).fetch1(
            "config_template",
            "project_path",
            "model_prefix",
            "shuffle",
            "trainingsetindex",
        )

        project_path = find_full_path(get_dlc_root_data_dir(), project_path)
        yml_path, _ = dlc_reader.read_yaml(project_path)

        evaluate_network(
            yml_path,
            Shuffles=[shuffle],  # this needs to be a list
            trainingsetindex=trainingsetindex,
            comparisonbodyparts="all",
        )

        eval_folder = get_evaluation_folder(
            trainFraction=dlc_config["TrainingFraction"][trainingsetindex],
            shuffle=shuffle,
            cfg=dlc_config,
            modelprefix=model_prefix,
        )
        eval_path = project_path / eval_folder
        assert eval_path.exists(), f"Couldn't find evaluation folder:\n{eval_path}"

        eval_csvs = list(eval_path.glob("*csv"))
        max_modified_time = 0
        for eval_csv in eval_csvs:
            modified_time = os.path.getmtime(eval_csv)
            if modified_time > max_modified_time:
                eval_csv_latest = eval_csv
        with open(eval_csv_latest, newline="") as f:
            results = list(csv.DictReader(f, delimiter=","))[0]
        # in testing, test_error_p returned empty string
        self.insert1(
            dict(
                key,
                train_iterations=results["Training iterations:"],
                train_error=results[" Train error(px)"],
                test_error=results[" Test error(px)"],
                p_cutoff=results["p-cutoff used"],
                train_error_p=results["Train error with p-cutoff"],
                test_error_p=results["Test error with p-cutoff"],
            )
        )

PoseEstimationTask

Bases: Manual

Staging table for pairing of video recording and model before inference.

Attributes:

Name Type Description
VideoRecording foreign key

Video recording key.

Model foreign key

Model name.

task_mode load or trigger

Optional. Default load. Or trigger computation.

pose_estimation_output_dir varchar(255)

Optional. Output dir relative to get_dlc_root_data_dir.

pose_estimation_params longblob

Optional. Params for DLC's analyze_videos params, if not default.

Source code in element_deeplabcut/model.py
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
@schema
class PoseEstimationTask(dj.Manual):
    """Staging table for pairing of video recording and model before inference.

    Attributes:
        VideoRecording (foreign key): Video recording key.
        Model (foreign key): Model name.
        task_mode (load or trigger): Optional. Default load. Or trigger computation.
        pose_estimation_output_dir ( varchar(255) ): Optional. Output dir relative to
                                                     get_dlc_root_data_dir.
        pose_estimation_params (longblob): Optional. Params for DLC's analyze_videos
                                           params, if not default."""

    definition = """
    -> VideoRecording                           # Session -> Recording + File part table
    -> Model                                    # Must specify a DLC project_path
    ---
    task_mode='load' : enum('load', 'trigger')  # load results or trigger computation
    pose_estimation_output_dir='': varchar(255) # output dir relative to the root dir
    pose_estimation_params=null  : longblob     # analyze_videos params, if not default
    """

    @classmethod
    def infer_output_dir(cls, key: dict, relative: bool = False, mkdir: bool = False):
        """Return the expected pose_estimation_output_dir.

        Spaces in model name are replaced with hyphens.
        Based on convention: / video_dir / Device_{}_Recording_{}_Model_{}

        Args:
            key: DataJoint key specifying a pairing of VideoRecording and Model.
            relative (bool): Report directory relative to get_dlc_processed_data_dir().
            mkdir (bool): Default False. Make directory if it doesn't exist.
        """
        video_filepath = find_full_path(
            get_dlc_root_data_dir(),
            (VideoRecording.File & key).fetch("file_path", limit=1)[0],
        )
        root_dir = find_root_directory(get_dlc_root_data_dir(), video_filepath.parent)
        recording_key = VideoRecording & key
        device = "-".join(
            str(v)
            for v in (_linking_module.Device & recording_key).fetch1("KEY").values()
        )
        if get_dlc_processed_data_dir():
            processed_dir = Path(get_dlc_processed_data_dir())
        else:  # if processed not provided, default to where video is
            processed_dir = root_dir

        output_dir = (
            processed_dir
            / video_filepath.parent.relative_to(root_dir)
            / (
                f'device_{device}_recording_{key["recording_id"]}_model_'
                + key["model_name"].replace(" ", "-")
            )
        )
        if mkdir:
            output_dir.mkdir(parents=True, exist_ok=True)
        return output_dir.relative_to(processed_dir) if relative else output_dir

    @classmethod
    def generate(
        cls,
        video_recording_key: dict,
        model_name: str,
        *,
        task_mode: str = None,
        analyze_videos_params: dict = None,
    ):
        """Insert PoseEstimationTask in inferred output dir.

        Based on the convention / video_dir / device_{}_recording_{}_model_{}

        Args:
            video_recording_key (dict): DataJoint key specifying a VideoRecording.

            model_name (str): Name of DLC model (from Model table) to be used for inference.
            task_mode (str): Default 'trigger' computation. Or 'load' existing results.
            analyze_videos_params (dict): Optional. Parameters passed to DLC's analyze_videos:
                videotype, gputouse, save_as_csv, batchsize, cropping, TFGPUinference,
                dynamic, robust_nframes, allow_growth, use_shelve
        """
        processed_dir = get_dlc_processed_data_dir()
        output_dir = cls.infer_output_dir(
            {**video_recording_key, "model_name": model_name},
            relative=False,
            mkdir=True,
        )

        if task_mode is None:
            try:
                _ = dlc_reader.PoseEstimation(output_dir)
            except FileNotFoundError:
                task_mode = "trigger"
            else:
                task_mode = "load"

        cls.insert1(
            {
                **video_recording_key,
                "model_name": model_name,
                "task_mode": task_mode,
                "pose_estimation_params": analyze_videos_params,
                "pose_estimation_output_dir": output_dir.relative_to(
                    processed_dir
                ).as_posix(),
            }
        )

    insert_estimation_task = generate

infer_output_dir(key, relative=False, mkdir=False) classmethod

Return the expected pose_estimation_output_dir.

Spaces in model name are replaced with hyphens. Based on convention: / video_dir / Device_{}Recording{}Model{}

Parameters:

Name Type Description Default
key dict

DataJoint key specifying a pairing of VideoRecording and Model.

required
relative bool

Report directory relative to get_dlc_processed_data_dir().

False
mkdir bool

Default False. Make directory if it doesn't exist.

False
Source code in element_deeplabcut/model.py
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
@classmethod
def infer_output_dir(cls, key: dict, relative: bool = False, mkdir: bool = False):
    """Return the expected pose_estimation_output_dir.

    Spaces in model name are replaced with hyphens.
    Based on convention: / video_dir / Device_{}_Recording_{}_Model_{}

    Args:
        key: DataJoint key specifying a pairing of VideoRecording and Model.
        relative (bool): Report directory relative to get_dlc_processed_data_dir().
        mkdir (bool): Default False. Make directory if it doesn't exist.
    """
    video_filepath = find_full_path(
        get_dlc_root_data_dir(),
        (VideoRecording.File & key).fetch("file_path", limit=1)[0],
    )
    root_dir = find_root_directory(get_dlc_root_data_dir(), video_filepath.parent)
    recording_key = VideoRecording & key
    device = "-".join(
        str(v)
        for v in (_linking_module.Device & recording_key).fetch1("KEY").values()
    )
    if get_dlc_processed_data_dir():
        processed_dir = Path(get_dlc_processed_data_dir())
    else:  # if processed not provided, default to where video is
        processed_dir = root_dir

    output_dir = (
        processed_dir
        / video_filepath.parent.relative_to(root_dir)
        / (
            f'device_{device}_recording_{key["recording_id"]}_model_'
            + key["model_name"].replace(" ", "-")
        )
    )
    if mkdir:
        output_dir.mkdir(parents=True, exist_ok=True)
    return output_dir.relative_to(processed_dir) if relative else output_dir

generate(video_recording_key, model_name, *, task_mode=None, analyze_videos_params=None) classmethod

Insert PoseEstimationTask in inferred output dir.

Based on the convention / video_dir / device_{}recording{}model{}

Parameters:

Name Type Description Default
video_recording_key dict

DataJoint key specifying a VideoRecording.

required
model_name str

Name of DLC model (from Model table) to be used for inference.

required
task_mode str

Default 'trigger' computation. Or 'load' existing results.

None
analyze_videos_params dict

Optional. Parameters passed to DLC's analyze_videos: videotype, gputouse, save_as_csv, batchsize, cropping, TFGPUinference, dynamic, robust_nframes, allow_growth, use_shelve

None
Source code in element_deeplabcut/model.py
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
@classmethod
def generate(
    cls,
    video_recording_key: dict,
    model_name: str,
    *,
    task_mode: str = None,
    analyze_videos_params: dict = None,
):
    """Insert PoseEstimationTask in inferred output dir.

    Based on the convention / video_dir / device_{}_recording_{}_model_{}

    Args:
        video_recording_key (dict): DataJoint key specifying a VideoRecording.

        model_name (str): Name of DLC model (from Model table) to be used for inference.
        task_mode (str): Default 'trigger' computation. Or 'load' existing results.
        analyze_videos_params (dict): Optional. Parameters passed to DLC's analyze_videos:
            videotype, gputouse, save_as_csv, batchsize, cropping, TFGPUinference,
            dynamic, robust_nframes, allow_growth, use_shelve
    """
    processed_dir = get_dlc_processed_data_dir()
    output_dir = cls.infer_output_dir(
        {**video_recording_key, "model_name": model_name},
        relative=False,
        mkdir=True,
    )

    if task_mode is None:
        try:
            _ = dlc_reader.PoseEstimation(output_dir)
        except FileNotFoundError:
            task_mode = "trigger"
        else:
            task_mode = "load"

    cls.insert1(
        {
            **video_recording_key,
            "model_name": model_name,
            "task_mode": task_mode,
            "pose_estimation_params": analyze_videos_params,
            "pose_estimation_output_dir": output_dir.relative_to(
                processed_dir
            ).as_posix(),
        }
    )

PoseEstimation

Bases: Computed

Results of pose estimation.

Attributes:

Name Type Description
PoseEstimationTask foreign key

Pose Estimation Task key.

post_estimation_time datetime

time of generation of this set of DLC results.

Source code in element_deeplabcut/model.py
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
@schema
class PoseEstimation(dj.Computed):
    """Results of pose estimation.

    Attributes:
        PoseEstimationTask (foreign key): Pose Estimation Task key.
        post_estimation_time (datetime): time of generation of this set of DLC results.
    """

    definition = """
    -> PoseEstimationTask
    ---
    pose_estimation_time: datetime  # time of generation of this set of DLC results
    """

    class BodyPartPosition(dj.Part):
        """Position of individual body parts by frame index

        Attributes:
            PoseEstimation (foreign key): Pose Estimation key.
            Model.BodyPart (foreign key): Body Part key.
            frame_index (longblob): Frame index in model.
            x_pos (longblob): X position.
            y_pos (longblob): Y position.
            z_pos (longblob): Optional. Z position.
            likelihood (longblob): Model confidence."""

        definition = """ # uses DeepLabCut h5 output for body part position
        -> master
        -> Model.BodyPart
        ---
        frame_index : longblob     # frame index in model
        x_pos       : longblob
        y_pos       : longblob
        z_pos=null  : longblob
        likelihood  : longblob
        """

    def make(self, key):
        """.populate() method will launch training for each PoseEstimationTask"""
        # ID model and directories
        dlc_model = (Model & key).fetch1()
        task_mode, output_dir = (PoseEstimationTask & key).fetch1(
            "task_mode", "pose_estimation_output_dir"
        )
        if not output_dir:
            output_dir = PoseEstimationTask.infer_output_dir(
                key, relative=True, mkdir=True
            )
            # update pose_estimation_output_dir
            PoseEstimationTask.update1(
                {**key, "pose_estimation_output_dir": output_dir.as_posix()}
            )
        output_dir = find_full_path(get_dlc_root_data_dir(), output_dir)

        # Triger PoseEstimation
        if task_mode == "trigger":
            # Triggering dlc for pose estimation required:
            # - project_path: full path to the directory containing the trained model
            # - video_filepaths: full paths to the video files for inference
            # - analyze_video_params: optional parameters to analyze video
            project_path = find_full_path(
                get_dlc_root_data_dir(), dlc_model["project_path"]
            )
            video_filepaths = [
                find_full_path(get_dlc_root_data_dir(), fp).as_posix()
                for fp in (VideoRecording.File & key).fetch("file_path")
            ]
            analyze_video_params = (PoseEstimationTask & key).fetch1(
                "pose_estimation_params"
            ) or {}

            dlc_reader.do_pose_estimation(
                key,
                video_filepaths,
                dlc_model,
                project_path,
                output_dir,
                **analyze_video_params,
            )

        dlc_result = dlc_reader.PoseEstimation(output_dir)
        creation_time = datetime.fromtimestamp(dlc_result.creation_time).strftime(
            "%Y-%m-%d %H:%M:%S"
        )

        body_parts = [
            {
                **key,
                "body_part": k,
                "frame_index": np.arange(dlc_result.nframes),
                "x_pos": v["x"],
                "y_pos": v["y"],
                "z_pos": v.get("z"),
                "likelihood": v["likelihood"],
            }
            for k, v in dlc_result.data.items()
        ]

        self.insert1({**key, "pose_estimation_time": creation_time})
        self.BodyPartPosition.insert(body_parts)

    @classmethod
    def get_trajectory(cls, key: dict, body_parts: list = "all") -> pd.DataFrame:
        """Returns a pandas dataframe of coordinates of the specified body_part(s)

        Args:
            key (dict): A DataJoint query specifying one PoseEstimation entry.
            body_parts (list, optional): Body parts as a list. If "all", all joints

        Returns:
            df: multi index pandas dataframe with DLC scorer names, body_parts
                and x/y coordinates of each joint name for a camera_id, similar to
                 output of DLC dataframe. If 2D, z is set of zeros
        """
        model_name = key["model_name"]

        if body_parts == "all":
            body_parts = (cls.BodyPartPosition & key).fetch("body_part")
        elif not isinstance(body_parts, list):
            body_parts = list(body_parts)

        df = None
        for body_part in body_parts:
            x_pos, y_pos, z_pos, likelihood = (
                cls.BodyPartPosition & {"body_part": body_part}
            ).fetch1("x_pos", "y_pos", "z_pos", "likelihood")
            if not z_pos:
                z_pos = np.zeros_like(x_pos)

            a = np.vstack((x_pos, y_pos, z_pos, likelihood))
            a = a.T
            pdindex = pd.MultiIndex.from_product(
                [[model_name], [body_part], ["x", "y", "z", "likelihood"]],
                names=["scorer", "bodyparts", "coords"],
            )
            frame = pd.DataFrame(a, columns=pdindex, index=range(0, a.shape[0]))
            df = pd.concat([df, frame], axis=1)
        return df

BodyPartPosition

Bases: Part

Position of individual body parts by frame index

Attributes:

Name Type Description
PoseEstimation foreign key

Pose Estimation key.

Model.BodyPart foreign key

Body Part key.

frame_index longblob

Frame index in model.

x_pos longblob

X position.

y_pos longblob

Y position.

z_pos longblob

Optional. Z position.

likelihood longblob

Model confidence.

Source code in element_deeplabcut/model.py
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
class BodyPartPosition(dj.Part):
    """Position of individual body parts by frame index

    Attributes:
        PoseEstimation (foreign key): Pose Estimation key.
        Model.BodyPart (foreign key): Body Part key.
        frame_index (longblob): Frame index in model.
        x_pos (longblob): X position.
        y_pos (longblob): Y position.
        z_pos (longblob): Optional. Z position.
        likelihood (longblob): Model confidence."""

    definition = """ # uses DeepLabCut h5 output for body part position
    -> master
    -> Model.BodyPart
    ---
    frame_index : longblob     # frame index in model
    x_pos       : longblob
    y_pos       : longblob
    z_pos=null  : longblob
    likelihood  : longblob
    """

make(key)

.populate() method will launch training for each PoseEstimationTask

Source code in element_deeplabcut/model.py
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
def make(self, key):
    """.populate() method will launch training for each PoseEstimationTask"""
    # ID model and directories
    dlc_model = (Model & key).fetch1()
    task_mode, output_dir = (PoseEstimationTask & key).fetch1(
        "task_mode", "pose_estimation_output_dir"
    )
    if not output_dir:
        output_dir = PoseEstimationTask.infer_output_dir(
            key, relative=True, mkdir=True
        )
        # update pose_estimation_output_dir
        PoseEstimationTask.update1(
            {**key, "pose_estimation_output_dir": output_dir.as_posix()}
        )
    output_dir = find_full_path(get_dlc_root_data_dir(), output_dir)

    # Triger PoseEstimation
    if task_mode == "trigger":
        # Triggering dlc for pose estimation required:
        # - project_path: full path to the directory containing the trained model
        # - video_filepaths: full paths to the video files for inference
        # - analyze_video_params: optional parameters to analyze video
        project_path = find_full_path(
            get_dlc_root_data_dir(), dlc_model["project_path"]
        )
        video_filepaths = [
            find_full_path(get_dlc_root_data_dir(), fp).as_posix()
            for fp in (VideoRecording.File & key).fetch("file_path")
        ]
        analyze_video_params = (PoseEstimationTask & key).fetch1(
            "pose_estimation_params"
        ) or {}

        dlc_reader.do_pose_estimation(
            key,
            video_filepaths,
            dlc_model,
            project_path,
            output_dir,
            **analyze_video_params,
        )

    dlc_result = dlc_reader.PoseEstimation(output_dir)
    creation_time = datetime.fromtimestamp(dlc_result.creation_time).strftime(
        "%Y-%m-%d %H:%M:%S"
    )

    body_parts = [
        {
            **key,
            "body_part": k,
            "frame_index": np.arange(dlc_result.nframes),
            "x_pos": v["x"],
            "y_pos": v["y"],
            "z_pos": v.get("z"),
            "likelihood": v["likelihood"],
        }
        for k, v in dlc_result.data.items()
    ]

    self.insert1({**key, "pose_estimation_time": creation_time})
    self.BodyPartPosition.insert(body_parts)

get_trajectory(key, body_parts='all') classmethod

Returns a pandas dataframe of coordinates of the specified body_part(s)

Parameters:

Name Type Description Default
key dict

A DataJoint query specifying one PoseEstimation entry.

required
body_parts list

Body parts as a list. If "all", all joints

'all'

Returns:

Name Type Description
df DataFrame

multi index pandas dataframe with DLC scorer names, body_parts and x/y coordinates of each joint name for a camera_id, similar to output of DLC dataframe. If 2D, z is set of zeros

Source code in element_deeplabcut/model.py
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
@classmethod
def get_trajectory(cls, key: dict, body_parts: list = "all") -> pd.DataFrame:
    """Returns a pandas dataframe of coordinates of the specified body_part(s)

    Args:
        key (dict): A DataJoint query specifying one PoseEstimation entry.
        body_parts (list, optional): Body parts as a list. If "all", all joints

    Returns:
        df: multi index pandas dataframe with DLC scorer names, body_parts
            and x/y coordinates of each joint name for a camera_id, similar to
             output of DLC dataframe. If 2D, z is set of zeros
    """
    model_name = key["model_name"]

    if body_parts == "all":
        body_parts = (cls.BodyPartPosition & key).fetch("body_part")
    elif not isinstance(body_parts, list):
        body_parts = list(body_parts)

    df = None
    for body_part in body_parts:
        x_pos, y_pos, z_pos, likelihood = (
            cls.BodyPartPosition & {"body_part": body_part}
        ).fetch1("x_pos", "y_pos", "z_pos", "likelihood")
        if not z_pos:
            z_pos = np.zeros_like(x_pos)

        a = np.vstack((x_pos, y_pos, z_pos, likelihood))
        a = a.T
        pdindex = pd.MultiIndex.from_product(
            [[model_name], [body_part], ["x", "y", "z", "likelihood"]],
            names=["scorer", "bodyparts", "coords"],
        )
        frame = pd.DataFrame(a, columns=pdindex, index=range(0, a.shape[0]))
        df = pd.concat([df, frame], axis=1)
    return df

str_to_bool(value)

Return whether the provided string represents true. Otherwise false.

Parameters:

Name Type Description Default
value any

Any input

required

Returns:

Name Type Description
bool bool

True if value in ("y", "yes", "t", "true", "on", "1")

Source code in element_deeplabcut/model.py
807
808
809
810
811
812
813
814
815
816
817
818
819
820
def str_to_bool(value) -> bool:
    """Return whether the provided string represents true. Otherwise false.

    Args:
        value (any): Any input

    Returns:
        bool (bool): True if value in ("y", "yes", "t", "true", "on", "1")
    """
    # Due to distutils equivalent depreciation in 3.10
    # Adopted from github.com/PostHog/posthog/blob/master/posthog/utils.py
    if not value:
        return False
    return str(value).lower() in ("y", "yes", "t", "true", "on", "1")