Directory
Classes
def ApproveCondition(
signal_id: str,
):
Represents a dependency on an signal from a user.
Parameter |
Type |
signal_id |
str |
Methods
from_flyte_idl()
def from_flyte_idl(
pb2_object: flyteidl.core.workflow_pb2.ApproveCondition,
):
Parameter |
Type |
pb2_object |
flyteidl.core.workflow_pb2.ApproveCondition |
serialize_to_string()
def serialize_to_string()
short_string()
to_flyte_idl()
verbose_string()
Properties
Property |
Type |
Description |
is_empty |
|
|
signal_id |
|
|
def ArrayNode(
target: typing.Union[flytekit.core.launch_plan.LaunchPlan, flytekit.core.task.ReferenceTask, ForwardRef('FlyteLaunchPlan')],
bindings: typing.Optional[typing.List[flytekit.models.literals.Binding]],
concurrency: typing.Optional[int],
min_successes: typing.Optional[int],
min_success_ratio: typing.Optional[float],
metadata: typing.Optional[flytekit.models.core.workflow.NodeMetadata],
):
Parameter |
Type |
target |
typing.Union[flytekit.core.launch_plan.LaunchPlan, flytekit.core.task.ReferenceTask, ForwardRef('FlyteLaunchPlan')] |
bindings |
typing.Optional[typing.List[flytekit.models.literals.Binding]] |
concurrency |
typing.Optional[int] |
min_successes |
typing.Optional[int] |
min_success_ratio |
typing.Optional[float] |
metadata |
typing.Optional[flytekit.models.core.workflow.NodeMetadata] |
Methods
def construct_node_metadata()
local_execute()
def local_execute(
ctx: flytekit.core.context_manager.FlyteContext,
kwargs,
):
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
kwargs |
**kwargs |
local_execution_mode()
def local_execution_mode()
Properties
Property |
Type |
Description |
bindings |
|
|
bound_inputs |
|
|
concurrency |
|
|
data_mode |
|
|
execution_mode |
|
|
flyte_entity |
|
|
interface |
|
|
is_original_sub_node_interface |
|
|
min_success_ratio |
|
|
min_successes |
|
|
name |
|
|
python_interface |
|
|
upstream_nodes |
|
|
Base Class for all Tasks with a Python native Interface
. This should be directly used for task types, that do
not have a python function to be executed. Otherwise refer to :py:class:flytekit.PythonFunctionTask
.
def ArrayNodeMapTask(
python_function_task: typing.Union[flytekit.core.python_function_task.PythonFunctionTask, flytekit.core.python_function_task.PythonInstanceTask, functools.partial],
concurrency: typing.Optional[int],
min_successes: typing.Optional[int],
min_success_ratio: typing.Optional[float],
bound_inputs: typing.Optional[typing.Set[str]],
bound_inputs_values: typing.Optional[typing.Dict[str, typing.Any]],
kwargs,
):
Parameter |
Type |
python_function_task |
typing.Union[flytekit.core.python_function_task.PythonFunctionTask, flytekit.core.python_function_task.PythonInstanceTask, functools.partial] |
concurrency |
typing.Optional[int] |
min_successes |
typing.Optional[int] |
min_success_ratio |
typing.Optional[float] |
bound_inputs |
typing.Optional[typing.Set[str]] |
bound_inputs_values |
typing.Optional[typing.Dict[str, typing.Any]] |
kwargs |
**kwargs |
Methods
Method |
Description |
compile() |
Generates a node that encapsulates this task in a workflow definition |
construct_node_metadata() |
This returns metadata for the parent ArrayNode, not the sub-node getting mapped over |
dispatch_execute() |
This method translates Flyte’s Type system based input values and invokes the actual call to the executor |
execute() |
This method will be invoked to execute the task |
find_lhs() |
None |
get_command() |
TODO ADD bound variables to the resolver |
get_config() |
Returns the task config as a serializable dictionary |
get_container() |
Returns the container definition (if any) that is used to run the task on hosted Flyte |
get_custom() |
Return additional plugin-specific custom data (if any) as a serializable dictionary |
get_extended_resources() |
Returns the extended resources to allocate to the task on hosted Flyte |
get_input_types() |
Returns the names and python types as a dictionary for the inputs of this task |
get_k8s_pod() |
Returns the kubernetes pod definition (if any) that is used to run the task on hosted Flyte |
get_sql() |
Returns the Sql definition (if any) that is used to run the task on hosted Flyte |
get_type_for_input_var() |
Returns the python type for an input variable by name |
get_type_for_output_var() |
We override this method from flytekit |
local_execute() |
This function is used only in the local execution path and is responsible for calling dispatch execute |
local_execution_mode() |
None |
post_execute() |
Post execute is called after the execution has completed, with the user_params and can be used to clean-up, |
pre_execute() |
This is the method that will be invoked directly before executing the task method and before all the inputs |
prepare_target() |
Alters the underlying run_task command to modify it for map task execution and then resets it after |
sandbox_execute() |
Call dispatch_execute, in the context of a local sandbox execution |
set_command_prefix() |
None |
compile()
def compile(
ctx: flytekit.core.context_manager.FlyteContext,
args,
kwargs,
):
Generates a node that encapsulates this task in a workflow definition.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
args |
*args |
kwargs |
**kwargs |
def construct_node_metadata()
This returns metadata for the parent ArrayNode, not the sub-node getting mapped over
dispatch_execute()
def dispatch_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
This method translates Flyte’s Type system based input values and invokes the actual call to the executor
This method is also invoked during runtime.
VoidPromise
is returned in the case when the task itself declares no outputs.
Literal Map
is returned when the task returns either one more outputs in the declaration. Individual outputs
may be none
DynamicJobSpec
is returned when a dynamic workflow is executed
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
execute()
This method will be invoked to execute the task.
Parameter |
Type |
kwargs |
**kwargs |
find_lhs()
get_command()
def get_command(
settings: flytekit.configuration.SerializationSettings,
):
TODO ADD bound variables to the resolver. Maybe we need a different resolver?
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_config()
def get_config(
settings: flytekit.configuration.SerializationSettings,
):
Returns the task config as a serializable dictionary. This task config consists of metadata about the custom
defined for this task.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_container()
def get_container(
settings: flytekit.configuration.SerializationSettings,
):
Returns the container definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_custom()
def get_custom(
settings: flytekit.configuration.SerializationSettings,
):
Return additional plugin-specific custom data (if any) as a serializable dictionary.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_extended_resources()
def get_extended_resources(
settings: flytekit.configuration.SerializationSettings,
):
Returns the extended resources to allocate to the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
Returns the names and python types as a dictionary for the inputs of this task.
get_k8s_pod()
def get_k8s_pod(
settings: flytekit.configuration.SerializationSettings,
):
Returns the kubernetes pod definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_sql()
def get_sql(
settings: flytekit.configuration.SerializationSettings,
):
Returns the Sql definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
def get_type_for_input_var(
k: str,
v: typing.Any,
):
Returns the python type for an input variable by name.
Parameter |
Type |
k |
str |
v |
typing.Any |
get_type_for_output_var()
def get_type_for_output_var(
k: str,
v: typing.Any,
):
We override this method from flytekit.core.base_task Task because the dispatch_execute method uses this
interface to construct outputs. Each instance of an container_array task will however produce outputs
according to the underlying run_task interface and the array plugin handler will actually create a collection
from these individual outputs as the final output value.
Parameter |
Type |
k |
str |
v |
typing.Any |
local_execute()
def local_execute(
ctx: flytekit.core.context_manager.FlyteContext,
kwargs,
):
This function is used only in the local execution path and is responsible for calling dispatch execute.
Use this function when calling a task with native values (or Promises containing Flyte literals derived from
Python native values).
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
kwargs |
**kwargs |
local_execution_mode()
def local_execution_mode()
post_execute()
def post_execute(
user_params: typing.Optional[flytekit.core.context_manager.ExecutionParameters],
rval: typing.Any,
):
Post execute is called after the execution has completed, with the user_params and can be used to clean-up,
or alter the outputs to match the intended tasks outputs. If not overridden, then this function is a No-op
Parameter |
Type |
user_params |
typing.Optional[flytekit.core.context_manager.ExecutionParameters] |
rval |
typing.Any |
pre_execute()
def pre_execute(
user_params: typing.Optional[flytekit.core.context_manager.ExecutionParameters],
):
This is the method that will be invoked directly before executing the task method and before all the inputs
are converted. One particular case where this is useful is if the context is to be modified for the user process
to get some user space parameters. This also ensures that things like SparkSession are already correctly
setup before the type transformers are called
This should return either the same context of the mutated context
Parameter |
Type |
user_params |
typing.Optional[flytekit.core.context_manager.ExecutionParameters] |
prepare_target()
Alters the underlying run_task command to modify it for map task execution and then resets it after.
sandbox_execute()
def sandbox_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
Call dispatch_execute, in the context of a local sandbox execution. Not invoked during runtime.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
set_command_prefix()
def set_command_prefix(
cmd: typing.Optional[typing.List[str]],
):
Parameter |
Type |
cmd |
typing.Optional[typing.List[str]] |
Properties
Property |
Type |
Description |
bound_inputs |
|
|
concurrency |
|
|
deck_fields |
|
|
disable_deck |
|
|
docs |
|
|
enable_deck |
|
|
environment |
|
|
execution_mode |
|
|
instantiated_in |
|
|
interface |
|
|
is_original_sub_node_interface |
|
|
lhs |
|
|
location |
|
|
metadata |
|
|
min_success_ratio |
|
|
min_successes |
|
|
name |
|
|
python_function_task |
|
|
python_interface |
|
|
security_context |
|
|
task_config |
|
|
task_type |
|
|
task_type_version |
|
|
def ArrayNodeModel(
node: Node,
parallelism,
min_successes,
min_success_ratio,
execution_mode,
is_original_sub_node_interface,
data_mode,
bound_inputs,
):
TODO: docstring
Parameter |
Type |
node |
Node |
parallelism |
|
min_successes |
|
min_success_ratio |
|
execution_mode |
|
is_original_sub_node_interface |
|
data_mode |
|
bound_inputs |
|
Methods
from_flyte_idl()
def from_flyte_idl(
pb2_object,
):
Parameter |
Type |
pb2_object |
|
serialize_to_string()
def serialize_to_string()
short_string()
to_flyte_idl()
verbose_string()
Properties
Property |
Type |
Description |
is_empty |
|
|
node |
|
|
def BranchNode(
name: str,
ifelse_block: _core_wf.IfElseBlock,
):
Parameter |
Type |
name |
str |
ifelse_block |
_core_wf.IfElseBlock |
Properties
Property |
Type |
Description |
name |
|
|
def BranchNodeModel(
if_else: flytekit.models.core.workflow.IfElseBlock,
):
BranchNode is a special node that alter the flow of the workflow graph. It allows the control flow to branch at
runtime based on a series of conditions that get evaluated on various parameters (e.g. inputs, primitives).
Parameter |
Type |
if_else |
flytekit.models.core.workflow.IfElseBlock |
Methods
from_flyte_idl()
def from_flyte_idl(
pb2_objct,
):
serialize_to_string()
def serialize_to_string()
short_string()
to_flyte_idl()
verbose_string()
Properties
Property |
Type |
Description |
if_else |
|
|
is_empty |
|
|
Abstract class for class decorators.
We can attach config on the decorator class and use it in the upper level.
def ClassDecorator(
task_function,
kwargs,
):
If the decorator is called with arguments, func will be None.
If the decorator is called without arguments, func will be function to be decorated.
Parameter |
Type |
task_function |
|
kwargs |
**kwargs |
Methods
Method |
Description |
execute() |
This method will be called when the decorated function is called |
get_extra_config() |
Get the config of the decorator |
execute()
def execute(
args,
kwargs,
):
This method will be called when the decorated function is called.
Parameter |
Type |
args |
*args |
kwargs |
**kwargs |
Get the config of the decorator.
This is an intermediate class that represents Flyte Tasks that run a container at execution time. This is the vast
majority of tasks - the typical @task
decorated tasks for instance all run a container. An example of
something that doesn’t run a container would be something like the Athena SQL task.
def ContainerTask(
name: str,
image: typing.Union[str, flytekit.image_spec.image_spec.ImageSpec],
command: typing.List[str],
inputs: typing.Optional[typing.OrderedDict[str, typing.Type]],
metadata: typing.Optional[flytekit.core.base_task.TaskMetadata],
arguments: typing.Optional[typing.List[str]],
outputs: typing.Optional[typing.Dict[str, typing.Type]],
requests: typing.Optional[flytekit.core.resources.Resources],
limits: typing.Optional[flytekit.core.resources.Resources],
input_data_dir: typing.Optional[str],
output_data_dir: typing.Optional[str],
metadata_format: <enum 'MetadataFormat'>,
io_strategy: typing.Optional[flytekit.core.container_task.ContainerTask.IOStrategy],
secret_requests: typing.Optional[typing.List[flytekit.models.security.Secret]],
pod_template: typing.Optional[ForwardRef('PodTemplate')],
pod_template_name: typing.Optional[str],
local_logs: bool,
resources: typing.Optional[flytekit.core.resources.Resources],
kwargs,
):
Parameter |
Type |
name |
str |
image |
typing.Union[str, flytekit.image_spec.image_spec.ImageSpec] |
command |
typing.List[str] |
inputs |
typing.Optional[typing.OrderedDict[str, typing.Type]] |
metadata |
typing.Optional[flytekit.core.base_task.TaskMetadata] |
arguments |
typing.Optional[typing.List[str]] |
outputs |
typing.Optional[typing.Dict[str, typing.Type]] |
requests |
typing.Optional[flytekit.core.resources.Resources] |
limits |
typing.Optional[flytekit.core.resources.Resources] |
input_data_dir |
typing.Optional[str] |
output_data_dir |
typing.Optional[str] |
metadata_format |
<enum 'MetadataFormat'> |
io_strategy |
typing.Optional[flytekit.core.container_task.ContainerTask.IOStrategy] |
secret_requests |
typing.Optional[typing.List[flytekit.models.security.Secret]] |
pod_template |
typing.Optional[ForwardRef('PodTemplate')] |
pod_template_name |
typing.Optional[str] |
local_logs |
bool |
resources |
typing.Optional[flytekit.core.resources.Resources] |
kwargs |
**kwargs |
Methods
Method |
Description |
compile() |
Generates a node that encapsulates this task in a workflow definition |
construct_node_metadata() |
Used when constructing the node that encapsulates this task as part of a broader workflow definition |
dispatch_execute() |
This method translates Flyte’s Type system based input values and invokes the actual call to the executor |
execute() |
This method will be invoked to execute the task |
find_lhs() |
None |
get_config() |
Returns the task config as a serializable dictionary |
get_container() |
Returns the container definition (if any) that is used to run the task on hosted Flyte |
get_custom() |
Return additional plugin-specific custom data (if any) as a serializable dictionary |
get_extended_resources() |
Returns the extended resources to allocate to the task on hosted Flyte |
get_input_types() |
Returns the names and python types as a dictionary for the inputs of this task |
get_k8s_pod() |
Returns the kubernetes pod definition (if any) that is used to run the task on hosted Flyte |
get_sql() |
Returns the Sql definition (if any) that is used to run the task on hosted Flyte |
get_type_for_input_var() |
Returns the python type for an input variable by name |
get_type_for_output_var() |
Returns the python type for the specified output variable by name |
local_execute() |
This function is used only in the local execution path and is responsible for calling dispatch execute |
local_execution_mode() |
None |
post_execute() |
Post execute is called after the execution has completed, with the user_params and can be used to clean-up, |
pre_execute() |
This is the method that will be invoked directly before executing the task method and before all the inputs |
sandbox_execute() |
Call dispatch_execute, in the context of a local sandbox execution |
compile()
def compile(
ctx: flytekit.core.context_manager.FlyteContext,
args,
kwargs,
):
Generates a node that encapsulates this task in a workflow definition.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
args |
*args |
kwargs |
**kwargs |
def construct_node_metadata()
Used when constructing the node that encapsulates this task as part of a broader workflow definition.
dispatch_execute()
def dispatch_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
This method translates Flyte’s Type system based input values and invokes the actual call to the executor
This method is also invoked during runtime.
VoidPromise
is returned in the case when the task itself declares no outputs.
Literal Map
is returned when the task returns either one more outputs in the declaration. Individual outputs
may be none
DynamicJobSpec
is returned when a dynamic workflow is executed
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
execute()
This method will be invoked to execute the task.
Parameter |
Type |
kwargs |
**kwargs |
find_lhs()
get_config()
def get_config(
settings: flytekit.configuration.SerializationSettings,
):
Returns the task config as a serializable dictionary. This task config consists of metadata about the custom
defined for this task.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_container()
def get_container(
settings: flytekit.configuration.SerializationSettings,
):
Returns the container definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_custom()
def get_custom(
settings: flytekit.configuration.SerializationSettings,
):
Return additional plugin-specific custom data (if any) as a serializable dictionary.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_extended_resources()
def get_extended_resources(
settings: flytekit.configuration.SerializationSettings,
):
Returns the extended resources to allocate to the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
Returns the names and python types as a dictionary for the inputs of this task.
get_k8s_pod()
def get_k8s_pod(
settings: flytekit.configuration.SerializationSettings,
):
Returns the kubernetes pod definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_sql()
def get_sql(
settings: flytekit.configuration.SerializationSettings,
):
Returns the Sql definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
def get_type_for_input_var(
k: str,
v: typing.Any,
):
Returns the python type for an input variable by name.
Parameter |
Type |
k |
str |
v |
typing.Any |
get_type_for_output_var()
def get_type_for_output_var(
k: str,
v: typing.Any,
):
Returns the python type for the specified output variable by name.
Parameter |
Type |
k |
str |
v |
typing.Any |
local_execute()
def local_execute(
ctx: flytekit.core.context_manager.FlyteContext,
kwargs,
):
This function is used only in the local execution path and is responsible for calling dispatch execute.
Use this function when calling a task with native values (or Promises containing Flyte literals derived from
Python native values).
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
kwargs |
**kwargs |
local_execution_mode()
def local_execution_mode()
post_execute()
def post_execute(
user_params: typing.Optional[flytekit.core.context_manager.ExecutionParameters],
rval: typing.Any,
):
Post execute is called after the execution has completed, with the user_params and can be used to clean-up,
or alter the outputs to match the intended tasks outputs. If not overridden, then this function is a No-op
Parameter |
Type |
user_params |
typing.Optional[flytekit.core.context_manager.ExecutionParameters] |
rval |
typing.Any |
pre_execute()
def pre_execute(
user_params: typing.Optional[flytekit.core.context_manager.ExecutionParameters],
):
This is the method that will be invoked directly before executing the task method and before all the inputs
are converted. One particular case where this is useful is if the context is to be modified for the user process
to get some user space parameters. This also ensures that things like SparkSession are already correctly
setup before the type transformers are called
This should return either the same context of the mutated context
Parameter |
Type |
user_params |
typing.Optional[flytekit.core.context_manager.ExecutionParameters] |
sandbox_execute()
def sandbox_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
Call dispatch_execute, in the context of a local sandbox execution. Not invoked during runtime.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
Properties
Property |
Type |
Description |
deck_fields |
|
|
disable_deck |
|
|
docs |
|
|
enable_deck |
|
|
environment |
|
|
instantiated_in |
|
|
interface |
|
|
lhs |
|
|
location |
|
|
metadata |
|
|
name |
|
|
python_interface |
|
|
resources |
|
|
security_context |
|
|
task_config |
|
|
task_type |
|
|
task_type_version |
|
|
This is the base eager task (aka eager workflow) type. It replaces the previous experiment eager task type circa
Q4 2024. Users unfamiliar with this concept should refer to the documentation for more information.
But basically, Python becomes propeller, and every task invocation, creates a stack frame on the Flyte cluster in
the form of an execution rather than on the actual memory stack.
def EagerAsyncPythonFunctionTask(
task_config: T,
task_function: Callable,
task_type,
ignore_input_vars: Optional[List[str]],
task_resolver: Optional[TaskResolverMixin],
node_dependency_hints: Optional[Iterable[Union['PythonFunctionTask', '_annotated_launch_plan.LaunchPlan', WorkflowBase]]],
enable_deck: bool,
kwargs,
):
Parameter |
Type |
task_config |
T |
task_function |
Callable |
task_type |
|
ignore_input_vars |
Optional[List[str]] |
task_resolver |
Optional[TaskResolverMixin] |
node_dependency_hints |
Optional[Iterable[Union['PythonFunctionTask', '_annotated_launch_plan.LaunchPlan', WorkflowBase]]] |
enable_deck |
bool |
kwargs |
**kwargs |
Methods
Method |
Description |
async_execute() |
Overrides the base execute function |
compile() |
Generates a node that encapsulates this task in a workflow definition |
compile_into_workflow() |
In the case of dynamic workflows, this function will produce a workflow definition at execution time which will |
construct_node_metadata() |
Used when constructing the node that encapsulates this task as part of a broader workflow definition |
dispatch_execute() |
This method translates Flyte’s Type system based input values and invokes the actual call to the executor |
dynamic_execute() |
By the time this function is invoked, the local_execute function should have unwrapped the Promises and Flyte |
execute() |
Overrides the base execute function |
find_lhs() |
None |
get_as_workflow() |
None |
get_command() |
Returns the command which should be used in the container definition for the serialized version of this task |
get_config() |
Returns the task config as a serializable dictionary |
get_container() |
Returns the container definition (if any) that is used to run the task on hosted Flyte |
get_custom() |
Return additional plugin-specific custom data (if any) as a serializable dictionary |
get_default_command() |
Returns the default pyflyte-execute command used to run this on hosted Flyte platforms |
get_extended_resources() |
Returns the extended resources to allocate to the task on hosted Flyte |
get_image() |
Update image spec based on fast registration usage, and return string representing the image |
get_input_types() |
Returns the names and python types as a dictionary for the inputs of this task |
get_k8s_pod() |
Returns the kubernetes pod definition (if any) that is used to run the task on hosted Flyte |
get_sql() |
Returns the Sql definition (if any) that is used to run the task on hosted Flyte |
get_type_for_input_var() |
Returns the python type for an input variable by name |
get_type_for_output_var() |
Returns the python type for the specified output variable by name |
local_execute() |
This function is used only in the local execution path and is responsible for calling dispatch execute |
local_execution_mode() |
None |
post_execute() |
Post execute is called after the execution has completed, with the user_params and can be used to clean-up, |
pre_execute() |
This is the method that will be invoked directly before executing the task method and before all the inputs |
reset_command_fn() |
Resets the command which should be used in the container definition of this task to the default arguments |
run() |
This is a helper function to help run eager parent tasks locally, pointing to a remote cluster |
run_with_backend() |
This is the main entry point to kick off a live run |
sandbox_execute() |
Call dispatch_execute, in the context of a local sandbox execution |
set_command_fn() |
By default, the task will run on the Flyte platform using the pyflyte-execute command |
set_resolver() |
By default, flytekit uses the DefaultTaskResolver to resolve the task |
async_execute()
def async_execute(
args,
kwargs,
):
Overrides the base execute function. This function does not handle dynamic at all. Eager and dynamic don’t mix.
Some notes on the different call scenarios since it’s a little different than other tasks.
a) starting local execution - eager_task()
-> last condition of call handler,
-> set execution mode and self.local_execute()
-> self.execute(native_vals)
-> 1) -> task function() or 2) -> self.run_with_backend() # fn name will be changed.
b) inside an eager task local execution - calling normal_task()
-> call handler detects in eager local execution (middle part of call handler)
-> call normal_task’s local_execute()
c) inside an eager task local execution - calling async_normal_task()
-> produces a coro, which when awaited/run
-> call handler detects in eager local execution (middle part of call handler)
-> call async_normal_task’s local_execute()
-> call AsyncPythonFunctionTask’s async_execute(), which awaits the task function
d) inside an eager task local execution - calling another_eager_task()
-> produces a coro, which when awaited/run
-> call handler detects in eager local execution (middle part of call handler)
-> call another_eager_task’s local_execute()
-> results are returned instead of being passed to create_native_named_tuple
d) eager_task, starting backend execution from entrypoint.py
-> eager_task.dispatch_execute(literals)
-> eager_task.execute(native values)
-> awaits eager_task.run_with_backend() # fn name will be changed
e) in an eager task during backend execution, calling any flyte_entity()
-> add the entity to the worker queue and await the result.
Parameter |
Type |
args |
*args |
kwargs |
**kwargs |
compile()
def compile(
ctx: flytekit.core.context_manager.FlyteContext,
args,
kwargs,
):
Generates a node that encapsulates this task in a workflow definition.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
args |
*args |
kwargs |
**kwargs |
compile_into_workflow()
def compile_into_workflow(
ctx: FlyteContext,
task_function: Callable,
kwargs,
):
In the case of dynamic workflows, this function will produce a workflow definition at execution time which will
then proceed to be executed.
Parameter |
Type |
ctx |
FlyteContext |
task_function |
Callable |
kwargs |
**kwargs |
def construct_node_metadata()
Used when constructing the node that encapsulates this task as part of a broader workflow definition.
dispatch_execute()
def dispatch_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
This method translates Flyte’s Type system based input values and invokes the actual call to the executor
This method is also invoked during runtime.
VoidPromise
is returned in the case when the task itself declares no outputs.
Literal Map
is returned when the task returns either one more outputs in the declaration. Individual outputs
may be none
DynamicJobSpec
is returned when a dynamic workflow is executed
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
dynamic_execute()
def dynamic_execute(
task_function: Callable,
kwargs,
):
By the time this function is invoked, the local_execute function should have unwrapped the Promises and Flyte
literal wrappers so that the kwargs we are working with here are now Python native literal values. This
function is also expected to return Python native literal values.
Since the user code within a dynamic task constitute a workflow, we have to first compile the workflow, and
then execute that workflow.
When running for real in production, the task would stop after the compilation step, and then create a file
representing that newly generated workflow, instead of executing it.
Parameter |
Type |
task_function |
Callable |
kwargs |
**kwargs |
execute()
Overrides the base execute function. This function does not handle dynamic at all. Eager and dynamic don’t mix.
Parameter |
Type |
kwargs |
**kwargs |
find_lhs()
get_as_workflow()
get_command()
def get_command(
settings: SerializationSettings,
):
Returns the command which should be used in the container definition for the serialized version of this task
registered on a hosted Flyte platform.
Parameter |
Type |
settings |
SerializationSettings |
get_config()
def get_config(
settings: SerializationSettings,
):
Returns the task config as a serializable dictionary. This task config consists of metadata about the custom
defined for this task.
Parameter |
Type |
settings |
SerializationSettings |
get_container()
def get_container(
settings: SerializationSettings,
):
Returns the container definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
SerializationSettings |
get_custom()
def get_custom(
settings: flytekit.configuration.SerializationSettings,
):
Return additional plugin-specific custom data (if any) as a serializable dictionary.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_default_command()
def get_default_command(
settings: SerializationSettings,
):
Returns the default pyflyte-execute command used to run this on hosted Flyte platforms.
Parameter |
Type |
settings |
SerializationSettings |
get_extended_resources()
def get_extended_resources(
settings: SerializationSettings,
):
Returns the extended resources to allocate to the task on hosted Flyte.
Parameter |
Type |
settings |
SerializationSettings |
get_image()
def get_image(
settings: SerializationSettings,
):
Update image spec based on fast registration usage, and return string representing the image
Parameter |
Type |
settings |
SerializationSettings |
Returns the names and python types as a dictionary for the inputs of this task.
get_k8s_pod()
def get_k8s_pod(
settings: SerializationSettings,
):
Returns the kubernetes pod definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
SerializationSettings |
get_sql()
def get_sql(
settings: flytekit.configuration.SerializationSettings,
):
Returns the Sql definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
def get_type_for_input_var(
k: str,
v: typing.Any,
):
Returns the python type for an input variable by name.
Parameter |
Type |
k |
str |
v |
typing.Any |
get_type_for_output_var()
def get_type_for_output_var(
k: str,
v: typing.Any,
):
Returns the python type for the specified output variable by name.
Parameter |
Type |
k |
str |
v |
typing.Any |
local_execute()
def local_execute(
ctx: flytekit.core.context_manager.FlyteContext,
kwargs,
):
This function is used only in the local execution path and is responsible for calling dispatch execute.
Use this function when calling a task with native values (or Promises containing Flyte literals derived from
Python native values).
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
kwargs |
**kwargs |
local_execution_mode()
def local_execution_mode()
post_execute()
def post_execute(
user_params: typing.Optional[flytekit.core.context_manager.ExecutionParameters],
rval: typing.Any,
):
Post execute is called after the execution has completed, with the user_params and can be used to clean-up,
or alter the outputs to match the intended tasks outputs. If not overridden, then this function is a No-op
Parameter |
Type |
user_params |
typing.Optional[flytekit.core.context_manager.ExecutionParameters] |
rval |
typing.Any |
pre_execute()
def pre_execute(
user_params: typing.Optional[flytekit.core.context_manager.ExecutionParameters],
):
This is the method that will be invoked directly before executing the task method and before all the inputs
are converted. One particular case where this is useful is if the context is to be modified for the user process
to get some user space parameters. This also ensures that things like SparkSession are already correctly
setup before the type transformers are called
This should return either the same context of the mutated context
Parameter |
Type |
user_params |
typing.Optional[flytekit.core.context_manager.ExecutionParameters] |
reset_command_fn()
Resets the command which should be used in the container definition of this task to the default arguments.
This is useful when the command line is overridden at serialization time.
run()
def run(
remote: 'FlyteRemote',
ss: SerializationSettings,
kwargs,
):
This is a helper function to help run eager parent tasks locally, pointing to a remote cluster. This is used
only for local testing for now.
Parameter |
Type |
remote |
'FlyteRemote' |
ss |
SerializationSettings |
kwargs |
**kwargs |
run_with_backend()
def run_with_backend(
kwargs,
):
This is the main entry point to kick off a live run. Like if you’re running locally, but want to use a
Flyte backend, or running for real on a Flyte backend.
Parameter |
Type |
kwargs |
**kwargs |
sandbox_execute()
def sandbox_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
Call dispatch_execute, in the context of a local sandbox execution. Not invoked during runtime.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
set_command_fn()
def set_command_fn(
get_command_fn: Optional[Callable[[SerializationSettings], List[str]]],
):
By default, the task will run on the Flyte platform using the pyflyte-execute command.
However, it can be useful to update the command with which the task is serialized for specific cases like
running map tasks (“pyflyte-map-execute”) or for fast-executed tasks.
Parameter |
Type |
get_command_fn |
Optional[Callable[[SerializationSettings], List[str]]] |
set_resolver()
def set_resolver(
resolver: TaskResolverMixin,
):
By default, flytekit uses the DefaultTaskResolver to resolve the task. This method allows the user to set a custom
task resolver. It can be useful to override the task resolver for specific cases like running tasks in the jupyter notebook.
Parameter |
Type |
resolver |
TaskResolverMixin |
Properties
Property |
Type |
Description |
container_image |
|
|
deck_fields |
|
|
disable_deck |
|
|
docs |
|
|
enable_deck |
|
|
environment |
|
|
execution_mode |
|
|
instantiated_in |
|
|
interface |
|
|
lhs |
|
|
location |
|
|
metadata |
|
|
name |
|
|
node_dependency_hints |
|
|
python_interface |
|
|
resources |
|
|
security_context |
|
|
task_config |
|
|
task_function |
|
|
task_resolver |
|
|
task_type |
|
|
task_type_version |
|
|
A node type that waits for user input before proceeding with a workflow.
A gate is a type of node that behaves like a task, but instead of running code, it either needs to wait
for user input to proceed or wait for a timer to complete running.
def Gate(
name: str,
input_type: typing.Optional[typing.Type],
upstream_item: typing.Optional[typing.Any],
sleep_duration: typing.Optional[datetime.timedelta],
timeout: typing.Optional[datetime.timedelta],
):
Parameter |
Type |
name |
str |
input_type |
typing.Optional[typing.Type] |
upstream_item |
typing.Optional[typing.Any] |
sleep_duration |
typing.Optional[datetime.timedelta] |
timeout |
typing.Optional[datetime.timedelta] |
Methods
def construct_node_metadata()
local_execute()
def local_execute(
ctx: FlyteContext,
kwargs,
):
Parameter |
Type |
ctx |
FlyteContext |
kwargs |
**kwargs |
local_execution_mode()
def local_execution_mode()
Properties
Property |
Type |
Description |
input_type |
|
|
literal_type |
|
|
name |
|
|
python_interface |
|
|
sleep_duration |
|
|
def GateNode(
signal: typing.Optional[flytekit.models.core.workflow.SignalCondition],
sleep: typing.Optional[flytekit.models.core.workflow.SleepCondition],
approve: typing.Optional[flytekit.models.core.workflow.ApproveCondition],
):
Parameter |
Type |
signal |
typing.Optional[flytekit.models.core.workflow.SignalCondition] |
sleep |
typing.Optional[flytekit.models.core.workflow.SleepCondition] |
approve |
typing.Optional[flytekit.models.core.workflow.ApproveCondition] |
Methods
from_flyte_idl()
def from_flyte_idl(
pb2_object: flyteidl.core.workflow_pb2.GateNode,
):
Parameter |
Type |
pb2_object |
flyteidl.core.workflow_pb2.GateNode |
serialize_to_string()
def serialize_to_string()
short_string()
to_flyte_idl()
verbose_string()
Properties
Property |
Type |
Description |
approve |
|
|
condition |
|
|
is_empty |
|
|
signal |
|
|
sleep |
|
|
Image is a structured wrapper for task container images used in object serialization.
Attributes:
name (str): A user-provided name to identify this image.
fqn (str): Fully qualified image name. This consists of
#. a registry location
#. a username
#. a repository name
For example: hostname/username/reponame
tag (str): Optional tag used to specify which version of an image to pull
digest (str): Optional digest used to specify which version of an image to pull
def Image(
name: str,
fqn: str,
tag: Optional[str],
digest: Optional[str],
):
Parameter |
Type |
name |
str |
fqn |
str |
tag |
Optional[str] |
digest |
Optional[str] |
Methods
from_dict()
def from_dict(
kvs: typing.Union[dict, list, str, int, float, bool, NoneType],
infer_missing,
):
Parameter |
Type |
kvs |
typing.Union[dict, list, str, int, float, bool, NoneType] |
infer_missing |
|
from_json()
def from_json(
s: typing.Union[str, bytes, bytearray],
parse_float,
parse_int,
parse_constant,
infer_missing,
kw,
):
Parameter |
Type |
s |
typing.Union[str, bytes, bytearray] |
parse_float |
|
parse_int |
|
parse_constant |
|
infer_missing |
|
kw |
|
look_up_image_info()
def look_up_image_info(
name: str,
image_identifier: str,
allow_no_tag_or_digest: bool,
):
Creates an Image
object from an image identifier string or a path to an ImageSpec yaml file.
This function is used when registering tasks/workflows with Admin. When using
the canonical Python-based development cycle, the version that is used to
register workflows and tasks with Admin should be the version of the image
itself, which should ideally be something unique like the git revision SHA1 of
the latest commit.
Parameter |
Type |
name |
str |
image_identifier |
str |
allow_no_tag_or_digest |
bool |
schema()
def schema(
infer_missing: bool,
only,
exclude,
many: bool,
context,
load_only,
dump_only,
partial: bool,
unknown,
):
Parameter |
Type |
infer_missing |
bool |
only |
|
exclude |
|
many |
bool |
context |
|
load_only |
|
dump_only |
|
partial |
bool |
unknown |
|
to_dict()
def to_dict(
encode_json,
):
Parameter |
Type |
encode_json |
|
to_json()
def to_json(
skipkeys: bool,
ensure_ascii: bool,
check_circular: bool,
allow_nan: bool,
indent: typing.Union[int, str, NoneType],
separators: typing.Tuple[str, str],
default: typing.Callable,
sort_keys: bool,
kw,
):
Parameter |
Type |
skipkeys |
bool |
ensure_ascii |
bool |
check_circular |
bool |
allow_nan |
bool |
indent |
typing.Union[int, str, NoneType] |
separators |
typing.Tuple[str, str] |
default |
typing.Callable |
sort_keys |
bool |
kw |
|
Properties
Property |
Type |
Description |
full |
|
|
version |
|
|
We recommend you to use ImageConfig.auto(img_name=None) to create an ImageConfig.
For example, ImageConfig.auto(img_name=““ghcr.io/flyteorg/flytecookbook:v1.0.0"”) will create an ImageConfig.
ImageConfig holds available images which can be used at registration time. A default image can be specified
along with optional additional images. Each image in the config must have a unique name.
Attributes:
default_image (Optional[Image]): The default image to be used as a container for task serialization.
images (List[Image]): Optional, additional images which can be used in task container definitions.
def ImageConfig(
default_image: Optional[Image],
images: Optional[List[Image]],
):
Parameter |
Type |
default_image |
Optional[Image] |
images |
Optional[List[Image]] |
Methods
auto()
def auto(
config_file: typing.Union[str, ConfigFile, None],
img_name: Optional[str],
):
Reads from config file or from img_name
Note that this function does not take into account the flytekit default images (see the Dockerfiles at the
base of this repo). To pick those up, see the auto_default_image function..
Parameter |
Type |
config_file |
typing.Union[str, ConfigFile, None] |
img_name |
Optional[str] |
auto_default_image()
create_from()
def create_from(
default_image: Optional[Image],
other_images: typing.Optional[typing.List[Image]],
):
Parameter |
Type |
default_image |
Optional[Image] |
other_images |
typing.Optional[typing.List[Image]] |
find_image()
Return an image, by name, if it exists.
from_dict()
def from_dict(
kvs: typing.Union[dict, list, str, int, float, bool, NoneType],
infer_missing,
):
Parameter |
Type |
kvs |
typing.Union[dict, list, str, int, float, bool, NoneType] |
infer_missing |
|
from_images()
def from_images(
default_image: str,
m: typing.Optional[typing.Dict[str, str]],
):
Allows you to programmatically create an ImageConfig. Usually only the default_image is required, unless
your workflow uses multiple images
.. code:: python
ImageConfig.from_dict(
“ghcr.io/flyteorg/flytecookbook:v1.0.0”,
{
“spark”: “ghcr.io/flyteorg/myspark:…”,
“other”: “…”,
}
)
urn:
Parameter |
Type |
default_image |
str |
m |
typing.Optional[typing.Dict[str, str]] |
from_json()
def from_json(
s: typing.Union[str, bytes, bytearray],
parse_float,
parse_int,
parse_constant,
infer_missing,
kw,
):
Parameter |
Type |
s |
typing.Union[str, bytes, bytearray] |
parse_float |
|
parse_int |
|
parse_constant |
|
infer_missing |
|
kw |
|
schema()
def schema(
infer_missing: bool,
only,
exclude,
many: bool,
context,
load_only,
dump_only,
partial: bool,
unknown,
):
Parameter |
Type |
infer_missing |
bool |
only |
|
exclude |
|
many |
bool |
context |
|
load_only |
|
dump_only |
|
partial |
bool |
unknown |
|
to_dict()
def to_dict(
encode_json,
):
Parameter |
Type |
encode_json |
|
to_json()
def to_json(
skipkeys: bool,
ensure_ascii: bool,
check_circular: bool,
allow_nan: bool,
indent: typing.Union[int, str, NoneType],
separators: typing.Tuple[str, str],
default: typing.Callable,
sort_keys: bool,
kw,
):
Parameter |
Type |
skipkeys |
bool |
ensure_ascii |
bool |
check_circular |
bool |
allow_nan |
bool |
indent |
typing.Union[int, str, NoneType] |
separators |
typing.Tuple[str, str] |
default |
typing.Callable |
sort_keys |
bool |
kw |
|
validate_image()
def validate_image(
_: typing.Any,
param: str,
values: tuple,
):
Validates the image to match the standard format. Also validates that only one default image
is provided. a default image, is one that is specified as default=<image_uri>
or just <image_uri>
. All
other images should be provided with a name, in the format name=<image_uri>
This method can be used with the
CLI
Parameter |
Type |
_ |
typing.Any |
param |
str |
values |
tuple |
This class is used to specify the docker image that will be used to run the task.
def ImageSpec(
name: str,
python_version: str,
builder: typing.Optional[str],
source_root: typing.Optional[str],
env: typing.Optional[typing.Dict[str, str]],
registry: typing.Optional[str],
packages: typing.Optional[typing.List[str]],
conda_packages: typing.Optional[typing.List[str]],
conda_channels: typing.Optional[typing.List[str]],
requirements: typing.Optional[str],
apt_packages: typing.Optional[typing.List[str]],
cuda: typing.Optional[str],
cudnn: typing.Optional[str],
base_image: typing.Union[str, ForwardRef('ImageSpec'), NoneType],
platform: str,
pip_index: typing.Optional[str],
pip_extra_index_url: typing.Optional[typing.List[str]],
pip_secret_mounts: typing.Optional[typing.List[typing.Tuple[str, str]]],
pip_extra_args: typing.Optional[str],
registry_config: typing.Optional[str],
entrypoint: typing.Optional[typing.List[str]],
commands: typing.Optional[typing.List[str]],
tag_format: typing.Optional[str],
source_copy_mode: typing.Optional[flytekit.constants.CopyFileDetection],
copy: typing.Optional[typing.List[str]],
python_exec: typing.Optional[str],
):
Parameter |
Type |
name |
str |
python_version |
str |
builder |
typing.Optional[str] |
source_root |
typing.Optional[str] |
env |
typing.Optional[typing.Dict[str, str]] |
registry |
typing.Optional[str] |
packages |
typing.Optional[typing.List[str]] |
conda_packages |
typing.Optional[typing.List[str]] |
conda_channels |
typing.Optional[typing.List[str]] |
requirements |
typing.Optional[str] |
apt_packages |
typing.Optional[typing.List[str]] |
cuda |
typing.Optional[str] |
cudnn |
typing.Optional[str] |
base_image |
typing.Union[str, ForwardRef('ImageSpec'), NoneType] |
platform |
str |
pip_index |
typing.Optional[str] |
pip_extra_index_url |
typing.Optional[typing.List[str]] |
pip_secret_mounts |
typing.Optional[typing.List[typing.Tuple[str, str]]] |
pip_extra_args |
typing.Optional[str] |
registry_config |
typing.Optional[str] |
entrypoint |
typing.Optional[typing.List[str]] |
commands |
typing.Optional[typing.List[str]] |
tag_format |
typing.Optional[str] |
source_copy_mode |
typing.Optional[flytekit.constants.CopyFileDetection] |
copy |
typing.Optional[typing.List[str]] |
python_exec |
typing.Optional[str] |
Methods
Method |
Description |
exist() |
Check if the image exists in the registry |
force_push() |
Builder that returns a new image spec with force push enabled |
from_env() |
Create ImageSpec with the environment’s Python version and packages pinned to the ones in the environment |
image_name() |
Full image name with tag |
is_container() |
Check if the current container image in the pod is built from current image spec |
with_apt_packages() |
Builder that returns a new image spec with an additional list of apt packages that will be executed during the building process |
with_commands() |
Builder that returns a new image spec with an additional list of commands that will be executed during the building process |
with_copy() |
Builder that returns a new image spec with the source files copied to the destination directory |
with_packages() |
Builder that returns a new image speck with additional python packages that will be installed during the building process |
exist()
Check if the image exists in the registry.
Return True if the image exists in the registry, False otherwise.
Return None if failed to check if the image exists due to the permission issue or other reasons.
force_push()
Builder that returns a new image spec with force push enabled.
from_env()
def from_env(
pinned_packages: typing.Optional[typing.List[str]],
kwargs,
):
Create ImageSpec with the environment’s Python version and packages pinned to the ones in the environment.
Parameter |
Type |
pinned_packages |
typing.Optional[typing.List[str]] |
kwargs |
**kwargs |
image_name()
Full image name with tag.
is_container()
Check if the current container image in the pod is built from current image spec.
:return: True if the current container image in the pod is built from current image spec, False otherwise.
with_apt_packages()
def with_apt_packages(
apt_packages: typing.Union[str, typing.List[str]],
):
Builder that returns a new image spec with an additional list of apt packages that will be executed during the building process.
Parameter |
Type |
apt_packages |
typing.Union[str, typing.List[str]] |
with_commands()
def with_commands(
commands: typing.Union[str, typing.List[str]],
):
Builder that returns a new image spec with an additional list of commands that will be executed during the building process.
Parameter |
Type |
commands |
typing.Union[str, typing.List[str]] |
with_copy()
def with_copy(
src: typing.Union[str, typing.List[str]],
):
Builder that returns a new image spec with the source files copied to the destination directory.
Parameter |
Type |
src |
typing.Union[str, typing.List[str]] |
with_packages()
def with_packages(
packages: typing.Union[str, typing.List[str]],
):
Builder that returns a new image speck with additional python packages that will be installed during the building process.
Parameter |
Type |
packages |
typing.Union[str, typing.List[str]] |
Properties
Property |
Type |
Description |
tag |
|
|
Launch Plans are one of the core constructs of Flyte. Please take a look at the discussion in the
:std:ref:core concepts <flyte:divedeep-launchplans>
if you are unfamiliar with them.
Every workflow is registered with a default launch plan, which is just a launch plan with none of the additional
attributes set - no default values, fixed values, schedules, etc. Assuming you have the following workflow
.. code-block:: python
@workflow
def wf(a: int, c: str) -> str:
…
Create the default launch plan with
.. code-block:: python
LaunchPlan.get_or_create(workflow=my_wf)
If you specify additional parameters, you’ll also have to give the launch plan a unique name. Default and
fixed inputs can be expressed as Python native values like so:
.. literalinclude:: ../../../tests/flytekit/unit/core/test_launch_plan.py
:start-after: # fixed_and_default_start
:end-before: # fixed_and_default_end
:language: python
:dedent: 4
Additionally, a launch plan can be configured to run on a schedule and emit notifications.
Please see the relevant Schedule and Notification objects as well.
To configure the remaining parameters, you’ll need to import the relevant model objects as well.
.. literalinclude:: ../../../tests/flytekit/unit/core/test_launch_plan.py
:start-after: # schedule_start
:end-before: # schedule_end
:language: python
:dedent: 4
.. code-block:: python
from flytekit.models.common import Annotations, AuthRole, Labels, RawOutputDataConfig
Then use as follows
.. literalinclude:: ../../../tests/flytekit/unit/core/test_launch_plan.py
:start-after: # auth_role_start
:end-before: # auth_role_end
:language: python
:dedent: 4
def LaunchPlan(
name: str,
workflow: _annotated_workflow.WorkflowBase,
parameters: _interface_models.ParameterMap,
fixed_inputs: _literal_models.LiteralMap,
schedule: Optional[_schedule_model.Schedule],
notifications: Optional[List[_common_models.Notification]],
labels: Optional[_common_models.Labels],
annotations: Optional[_common_models.Annotations],
raw_output_data_config: Optional[_common_models.RawOutputDataConfig],
max_parallelism: Optional[int],
security_context: Optional[security.SecurityContext],
trigger: Optional[LaunchPlanTriggerBase],
overwrite_cache: Optional[bool],
auto_activate: bool,
):
Parameter |
Type |
name |
str |
workflow |
_annotated_workflow.WorkflowBase |
parameters |
_interface_models.ParameterMap |
fixed_inputs |
_literal_models.LiteralMap |
schedule |
Optional[_schedule_model.Schedule] |
notifications |
Optional[List[_common_models.Notification]] |
labels |
Optional[_common_models.Labels] |
annotations |
Optional[_common_models.Annotations] |
raw_output_data_config |
Optional[_common_models.RawOutputDataConfig] |
max_parallelism |
Optional[int] |
security_context |
Optional[security.SecurityContext] |
trigger |
Optional[LaunchPlanTriggerBase] |
overwrite_cache |
Optional[bool] |
auto_activate |
bool |
Methods
clone_with()
def clone_with(
name: str,
parameters: Optional[_interface_models.ParameterMap],
fixed_inputs: Optional[_literal_models.LiteralMap],
schedule: Optional[_schedule_model.Schedule],
notifications: Optional[List[_common_models.Notification]],
labels: Optional[_common_models.Labels],
annotations: Optional[_common_models.Annotations],
raw_output_data_config: Optional[_common_models.RawOutputDataConfig],
max_parallelism: Optional[int],
security_context: Optional[security.SecurityContext],
trigger: Optional[LaunchPlanTriggerBase],
overwrite_cache: Optional[bool],
auto_activate: bool,
):
Parameter |
Type |
name |
str |
parameters |
Optional[_interface_models.ParameterMap] |
fixed_inputs |
Optional[_literal_models.LiteralMap] |
schedule |
Optional[_schedule_model.Schedule] |
notifications |
Optional[List[_common_models.Notification]] |
labels |
Optional[_common_models.Labels] |
annotations |
Optional[_common_models.Annotations] |
raw_output_data_config |
Optional[_common_models.RawOutputDataConfig] |
max_parallelism |
Optional[int] |
security_context |
Optional[security.SecurityContext] |
trigger |
Optional[LaunchPlanTriggerBase] |
overwrite_cache |
Optional[bool] |
auto_activate |
bool |
def construct_node_metadata()
create()
def create(
name: str,
workflow: _annotated_workflow.WorkflowBase,
default_inputs: Optional[Dict[str, Any]],
fixed_inputs: Optional[Dict[str, Any]],
schedule: Optional[_schedule_model.Schedule],
notifications: Optional[List[_common_models.Notification]],
labels: Optional[_common_models.Labels],
annotations: Optional[_common_models.Annotations],
raw_output_data_config: Optional[_common_models.RawOutputDataConfig],
max_parallelism: Optional[int],
security_context: Optional[security.SecurityContext],
auth_role: Optional[_common_models.AuthRole],
trigger: Optional[LaunchPlanTriggerBase],
overwrite_cache: Optional[bool],
auto_activate: bool,
):
Parameter |
Type |
name |
str |
workflow |
_annotated_workflow.WorkflowBase |
default_inputs |
Optional[Dict[str, Any]] |
fixed_inputs |
Optional[Dict[str, Any]] |
schedule |
Optional[_schedule_model.Schedule] |
notifications |
Optional[List[_common_models.Notification]] |
labels |
Optional[_common_models.Labels] |
annotations |
Optional[_common_models.Annotations] |
raw_output_data_config |
Optional[_common_models.RawOutputDataConfig] |
max_parallelism |
Optional[int] |
security_context |
Optional[security.SecurityContext] |
auth_role |
Optional[_common_models.AuthRole] |
trigger |
Optional[LaunchPlanTriggerBase] |
overwrite_cache |
Optional[bool] |
auto_activate |
bool |
get_default_launch_plan()
def get_default_launch_plan(
ctx: FlyteContext,
workflow: _annotated_workflow.WorkflowBase,
):
Users should probably call the get_or_create function defined below instead. A default launch plan is the one
that will just pick up whatever default values are defined in the workflow function signature (if any) and
use the default auth information supplied during serialization, with no notifications or schedules.
Parameter |
Type |
ctx |
FlyteContext |
workflow |
_annotated_workflow.WorkflowBase |
get_or_create()
def get_or_create(
workflow: _annotated_workflow.WorkflowBase,
name: Optional[str],
default_inputs: Optional[Dict[str, Any]],
fixed_inputs: Optional[Dict[str, Any]],
schedule: Optional[_schedule_model.Schedule],
notifications: Optional[List[_common_models.Notification]],
labels: Optional[_common_models.Labels],
annotations: Optional[_common_models.Annotations],
raw_output_data_config: Optional[_common_models.RawOutputDataConfig],
max_parallelism: Optional[int],
security_context: Optional[security.SecurityContext],
auth_role: Optional[_common_models.AuthRole],
trigger: Optional[LaunchPlanTriggerBase],
overwrite_cache: Optional[bool],
auto_activate: bool,
):
This function offers a friendlier interface for creating launch plans. If the name for the launch plan is not
supplied, this assumes you are looking for the default launch plan for the workflow. If it is specified, it
will be used. If creating the default launch plan, none of the other arguments may be specified.
The resulting launch plan is also cached and if called again with the same name, the
cached version is returned
Parameter |
Type |
workflow |
_annotated_workflow.WorkflowBase |
name |
Optional[str] |
default_inputs |
Optional[Dict[str, Any]] |
fixed_inputs |
Optional[Dict[str, Any]] |
schedule |
Optional[_schedule_model.Schedule] |
notifications |
Optional[List[_common_models.Notification]] |
labels |
Optional[_common_models.Labels] |
annotations |
Optional[_common_models.Annotations] |
raw_output_data_config |
Optional[_common_models.RawOutputDataConfig] |
max_parallelism |
Optional[int] |
security_context |
Optional[security.SecurityContext] |
auth_role |
Optional[_common_models.AuthRole] |
trigger |
Optional[LaunchPlanTriggerBase] |
overwrite_cache |
Optional[bool] |
auto_activate |
bool |
Properties
Property |
Type |
Description |
annotations |
|
|
fixed_inputs |
|
|
interface |
|
|
labels |
|
|
max_parallelism |
|
|
name |
|
|
notifications |
|
|
overwrite_cache |
|
|
parameters |
|
|
python_interface |
|
|
raw_output_data_config |
|
|
saved_inputs |
|
|
schedule |
|
|
security_context |
|
|
should_auto_activate |
|
|
trigger |
|
|
workflow |
|
|
A MapPythonTask defines a :py:class:flytekit.PythonTask
which specifies how to run
an inner :py:class:flytekit.PythonFunctionTask
across a range of inputs in parallel.
def MapPythonTask(
python_function_task: typing.Union[flytekit.core.python_function_task.PythonFunctionTask, flytekit.core.python_function_task.PythonInstanceTask, functools.partial],
concurrency: typing.Optional[int],
min_success_ratio: typing.Optional[float],
bound_inputs: typing.Optional[typing.Set[str]],
kwargs,
):
Wrapper that creates a MapPythonTask
Parameter |
Type |
python_function_task |
typing.Union[flytekit.core.python_function_task.PythonFunctionTask, flytekit.core.python_function_task.PythonInstanceTask, functools.partial] |
concurrency |
typing.Optional[int] |
min_success_ratio |
typing.Optional[float] |
bound_inputs |
typing.Optional[typing.Set[str]] |
kwargs |
**kwargs |
Methods
Method |
Description |
compile() |
Generates a node that encapsulates this task in a workflow definition |
construct_node_metadata() |
Used when constructing the node that encapsulates this task as part of a broader workflow definition |
dispatch_execute() |
This method translates Flyte’s Type system based input values and invokes the actual call to the executor |
execute() |
This method will be invoked to execute the task |
find_lhs() |
None |
get_command() |
TODO ADD bound variables to the resolver |
get_config() |
Returns the task config as a serializable dictionary |
get_container() |
Returns the container definition (if any) that is used to run the task on hosted Flyte |
get_custom() |
Return additional plugin-specific custom data (if any) as a serializable dictionary |
get_extended_resources() |
Returns the extended resources to allocate to the task on hosted Flyte |
get_input_types() |
Returns the names and python types as a dictionary for the inputs of this task |
get_k8s_pod() |
Returns the kubernetes pod definition (if any) that is used to run the task on hosted Flyte |
get_sql() |
Returns the Sql definition (if any) that is used to run the task on hosted Flyte |
get_type_for_input_var() |
Returns the python type for an input variable by name |
get_type_for_output_var() |
We override this method from flytekit |
local_execute() |
This function is used only in the local execution path and is responsible for calling dispatch execute |
local_execution_mode() |
None |
post_execute() |
Post execute is called after the execution has completed, with the user_params and can be used to clean-up, |
pre_execute() |
This is the method that will be invoked directly before executing the task method and before all the inputs |
prepare_target() |
Alters the underlying run_task command to modify it for map task execution and then resets it after |
sandbox_execute() |
Call dispatch_execute, in the context of a local sandbox execution |
set_command_prefix() |
None |
compile()
def compile(
ctx: flytekit.core.context_manager.FlyteContext,
args,
kwargs,
):
Generates a node that encapsulates this task in a workflow definition.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
args |
*args |
kwargs |
**kwargs |
def construct_node_metadata()
Used when constructing the node that encapsulates this task as part of a broader workflow definition.
dispatch_execute()
def dispatch_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
This method translates Flyte’s Type system based input values and invokes the actual call to the executor
This method is also invoked during runtime.
VoidPromise
is returned in the case when the task itself declares no outputs.
Literal Map
is returned when the task returns either one more outputs in the declaration. Individual outputs
may be none
DynamicJobSpec
is returned when a dynamic workflow is executed
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
execute()
This method will be invoked to execute the task.
Parameter |
Type |
kwargs |
**kwargs |
find_lhs()
get_command()
def get_command(
settings: flytekit.configuration.SerializationSettings,
):
TODO ADD bound variables to the resolver. Maybe we need a different resolver?
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_config()
def get_config(
settings: flytekit.configuration.SerializationSettings,
):
Returns the task config as a serializable dictionary. This task config consists of metadata about the custom
defined for this task.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_container()
def get_container(
settings: flytekit.configuration.SerializationSettings,
):
Returns the container definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_custom()
def get_custom(
settings: flytekit.configuration.SerializationSettings,
):
Return additional plugin-specific custom data (if any) as a serializable dictionary.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_extended_resources()
def get_extended_resources(
settings: flytekit.configuration.SerializationSettings,
):
Returns the extended resources to allocate to the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
Returns the names and python types as a dictionary for the inputs of this task.
get_k8s_pod()
def get_k8s_pod(
settings: flytekit.configuration.SerializationSettings,
):
Returns the kubernetes pod definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_sql()
def get_sql(
settings: flytekit.configuration.SerializationSettings,
):
Returns the Sql definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
def get_type_for_input_var(
k: str,
v: typing.Any,
):
Returns the python type for an input variable by name.
Parameter |
Type |
k |
str |
v |
typing.Any |
get_type_for_output_var()
def get_type_for_output_var(
k: str,
v: typing.Any,
):
We override this method from flytekit.core.base_task Task because the dispatch_execute method uses this
interface to construct outputs. Each instance of an container_array task will however produce outputs
according to the underlying run_task interface and the array plugin handler will actually create a collection
from these individual outputs as the final output value.
Parameter |
Type |
k |
str |
v |
typing.Any |
local_execute()
def local_execute(
ctx: flytekit.core.context_manager.FlyteContext,
kwargs,
):
This function is used only in the local execution path and is responsible for calling dispatch execute.
Use this function when calling a task with native values (or Promises containing Flyte literals derived from
Python native values).
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
kwargs |
**kwargs |
local_execution_mode()
def local_execution_mode()
post_execute()
def post_execute(
user_params: typing.Optional[flytekit.core.context_manager.ExecutionParameters],
rval: typing.Any,
):
Post execute is called after the execution has completed, with the user_params and can be used to clean-up,
or alter the outputs to match the intended tasks outputs. If not overridden, then this function is a No-op
Parameter |
Type |
user_params |
typing.Optional[flytekit.core.context_manager.ExecutionParameters] |
rval |
typing.Any |
pre_execute()
def pre_execute(
user_params: typing.Optional[flytekit.core.context_manager.ExecutionParameters],
):
This is the method that will be invoked directly before executing the task method and before all the inputs
are converted. One particular case where this is useful is if the context is to be modified for the user process
to get some user space parameters. This also ensures that things like SparkSession are already correctly
setup before the type transformers are called
This should return either the same context of the mutated context
Parameter |
Type |
user_params |
typing.Optional[flytekit.core.context_manager.ExecutionParameters] |
prepare_target()
Alters the underlying run_task command to modify it for map task execution and then resets it after.
sandbox_execute()
def sandbox_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
Call dispatch_execute, in the context of a local sandbox execution. Not invoked during runtime.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
set_command_prefix()
def set_command_prefix(
cmd: typing.Optional[typing.List[str]],
):
Parameter |
Type |
cmd |
typing.Optional[typing.List[str]] |
Properties
Property |
Type |
Description |
bound_inputs |
|
|
deck_fields |
|
|
disable_deck |
|
|
docs |
|
|
enable_deck |
|
|
environment |
|
|
instantiated_in |
|
|
interface |
|
|
lhs |
|
|
location |
|
|
metadata |
|
|
name |
|
|
python_interface |
|
|
run_task |
|
|
security_context |
|
|
task_config |
|
|
task_type |
|
|
task_type_version |
|
|
This class will hold all the things necessary to make an SdkNode but we won’t make one until we know things like
ID, which from the registration step
def Node(
id: str,
metadata: _workflow_model.NodeMetadata,
bindings: List[_literal_models.Binding],
upstream_nodes: List[Node],
flyte_entity: Any,
):
Parameter |
Type |
id |
str |
metadata |
_workflow_model.NodeMetadata |
bindings |
List[_literal_models.Binding] |
upstream_nodes |
List[Node] |
flyte_entity |
Any |
Methods
runs_before()
def runs_before(
other: Node,
):
This is typically something we shouldn’t do. This modifies an attribute of the other instance rather than
self. But it’s done so only because we wanted this English function to be the same as the shift function.
That is, calling node_1.runs_before(node_2) and node_1 » node_2 are the same. The shift operator going the
other direction is not implemented to further avoid confusion. Right shift was picked rather than left shift
because that’s what most users are familiar with.
Parameter |
Type |
other |
Node |
with_overrides()
def with_overrides(
node_name: Optional[str],
aliases: Optional[Dict[str, str]],
requests: Optional[Resources],
limits: Optional[Resources],
timeout: Optional[Union[int, datetime.timedelta, object]],
retries: Optional[int],
interruptible: Optional[bool],
name: Optional[str],
task_config: Optional[Any],
container_image: Optional[str],
accelerator: Optional[BaseAccelerator],
cache: Optional[bool],
cache_version: Optional[str],
cache_serialize: Optional[bool],
shared_memory: Optional[Union[L[True], str]],
pod_template: Optional[PodTemplate],
resources: Optional[Resources],
args,
kwargs,
):
Parameter |
Type |
node_name |
Optional[str] |
aliases |
Optional[Dict[str, str]] |
requests |
Optional[Resources] |
limits |
Optional[Resources] |
timeout |
Optional[Union[int, datetime.timedelta, object]] |
retries |
Optional[int] |
interruptible |
Optional[bool] |
name |
Optional[str] |
task_config |
Optional[Any] |
container_image |
Optional[str] |
accelerator |
Optional[BaseAccelerator] |
cache |
Optional[bool] |
cache_version |
Optional[str] |
cache_serialize |
Optional[bool] |
shared_memory |
Optional[Union[L[True], str]] |
pod_template |
Optional[PodTemplate] |
resources |
Optional[Resources] |
args |
*args |
kwargs |
**kwargs |
Properties
Property |
Type |
Description |
bindings |
|
|
flyte_entity |
|
|
id |
|
|
metadata |
|
|
name |
|
|
outputs |
|
|
run_entity |
|
|
upstream_nodes |
|
|
These are options that can be configured for a launchplan during registration or overridden during an execution.
For instance two people may want to run the same workflow but have the offloaded data stored in two different
buckets. Or you may want labels or annotations to be different. This object is used when launching an execution
in a Flyte backend, and also when registering launch plans.
def Options(
labels: typing.Optional[flytekit.models.common.Labels],
annotations: typing.Optional[flytekit.models.common.Annotations],
raw_output_data_config: typing.Optional[flytekit.models.common.RawOutputDataConfig],
security_context: typing.Optional[flytekit.models.security.SecurityContext],
max_parallelism: typing.Optional[int],
notifications: typing.Optional[typing.List[flytekit.models.common.Notification]],
disable_notifications: typing.Optional[bool],
overwrite_cache: typing.Optional[bool],
):
Parameter |
Type |
labels |
typing.Optional[flytekit.models.common.Labels] |
annotations |
typing.Optional[flytekit.models.common.Annotations] |
raw_output_data_config |
typing.Optional[flytekit.models.common.RawOutputDataConfig] |
security_context |
typing.Optional[flytekit.models.security.SecurityContext] |
max_parallelism |
typing.Optional[int] |
notifications |
typing.Optional[typing.List[flytekit.models.common.Notification]] |
disable_notifications |
typing.Optional[bool] |
overwrite_cache |
typing.Optional[bool] |
Methods
default_from()
def default_from(
k8s_service_account: typing.Optional[str],
raw_data_prefix: typing.Optional[str],
):
Parameter |
Type |
k8s_service_account |
typing.Optional[str] |
raw_data_prefix |
typing.Optional[str] |
Dictionary that remembers insertion order
Custom PodTemplate specification for a Task.
def PodTemplate(
pod_spec: typing.Optional[ForwardRef('V1PodSpec')],
primary_container_name: str,
labels: typing.Optional[typing.Dict[str, str]],
annotations: typing.Optional[typing.Dict[str, str]],
):
Parameter |
Type |
pod_spec |
typing.Optional[ForwardRef('V1PodSpec')] |
primary_container_name |
str |
labels |
typing.Optional[typing.Dict[str, str]] |
annotations |
typing.Optional[typing.Dict[str, str]] |
A Python AutoContainer task should be used as the base for all extensions that want the user’s code to be in the
container and the container information to be automatically captured.
This base will auto configure the image and image version to be used for all its derivatives.
If you are looking to extend, you might prefer to use PythonFunctionTask
or PythonInstanceTask
def PythonAutoContainerTask(
name: str,
task_config: T,
task_type,
container_image: Optional[Union[str, ImageSpec]],
requests: Optional[Resources],
limits: Optional[Resources],
environment: Optional[Dict[str, str]],
task_resolver: Optional[TaskResolverMixin],
secret_requests: Optional[List[Secret]],
pod_template: Optional[PodTemplate],
pod_template_name: Optional[str],
accelerator: Optional[BaseAccelerator],
shared_memory: Optional[Union[L[True], str]],
resources: Optional[Resources],
kwargs,
):
Parameter |
Type |
name |
str |
task_config |
T |
task_type |
|
container_image |
Optional[Union[str, ImageSpec]] |
requests |
Optional[Resources] |
limits |
Optional[Resources] |
environment |
Optional[Dict[str, str]] |
task_resolver |
Optional[TaskResolverMixin] |
secret_requests |
Optional[List[Secret]] |
pod_template |
Optional[PodTemplate] |
pod_template_name |
Optional[str] |
accelerator |
Optional[BaseAccelerator] |
shared_memory |
Optional[Union[L[True], str]] |
resources |
Optional[Resources] |
kwargs |
**kwargs |
Methods
Method |
Description |
compile() |
Generates a node that encapsulates this task in a workflow definition |
construct_node_metadata() |
Used when constructing the node that encapsulates this task as part of a broader workflow definition |
dispatch_execute() |
This method translates Flyte’s Type system based input values and invokes the actual call to the executor |
execute() |
This method will be invoked to execute the task |
find_lhs() |
None |
get_command() |
Returns the command which should be used in the container definition for the serialized version of this task |
get_config() |
Returns the task config as a serializable dictionary |
get_container() |
Returns the container definition (if any) that is used to run the task on hosted Flyte |
get_custom() |
Return additional plugin-specific custom data (if any) as a serializable dictionary |
get_default_command() |
Returns the default pyflyte-execute command used to run this on hosted Flyte platforms |
get_extended_resources() |
Returns the extended resources to allocate to the task on hosted Flyte |
get_image() |
Update image spec based on fast registration usage, and return string representing the image |
get_input_types() |
Returns the names and python types as a dictionary for the inputs of this task |
get_k8s_pod() |
Returns the kubernetes pod definition (if any) that is used to run the task on hosted Flyte |
get_sql() |
Returns the Sql definition (if any) that is used to run the task on hosted Flyte |
get_type_for_input_var() |
Returns the python type for an input variable by name |
get_type_for_output_var() |
Returns the python type for the specified output variable by name |
local_execute() |
This function is used only in the local execution path and is responsible for calling dispatch execute |
local_execution_mode() |
None |
post_execute() |
Post execute is called after the execution has completed, with the user_params and can be used to clean-up, |
pre_execute() |
This is the method that will be invoked directly before executing the task method and before all the inputs |
reset_command_fn() |
Resets the command which should be used in the container definition of this task to the default arguments |
sandbox_execute() |
Call dispatch_execute, in the context of a local sandbox execution |
set_command_fn() |
By default, the task will run on the Flyte platform using the pyflyte-execute command |
set_resolver() |
By default, flytekit uses the DefaultTaskResolver to resolve the task |
compile()
def compile(
ctx: flytekit.core.context_manager.FlyteContext,
args,
kwargs,
):
Generates a node that encapsulates this task in a workflow definition.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
args |
*args |
kwargs |
**kwargs |
def construct_node_metadata()
Used when constructing the node that encapsulates this task as part of a broader workflow definition.
dispatch_execute()
def dispatch_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
This method translates Flyte’s Type system based input values and invokes the actual call to the executor
This method is also invoked during runtime.
VoidPromise
is returned in the case when the task itself declares no outputs.
Literal Map
is returned when the task returns either one more outputs in the declaration. Individual outputs
may be none
DynamicJobSpec
is returned when a dynamic workflow is executed
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
execute()
This method will be invoked to execute the task.
Parameter |
Type |
kwargs |
**kwargs |
find_lhs()
get_command()
def get_command(
settings: SerializationSettings,
):
Returns the command which should be used in the container definition for the serialized version of this task
registered on a hosted Flyte platform.
Parameter |
Type |
settings |
SerializationSettings |
get_config()
def get_config(
settings: SerializationSettings,
):
Returns the task config as a serializable dictionary. This task config consists of metadata about the custom
defined for this task.
Parameter |
Type |
settings |
SerializationSettings |
get_container()
def get_container(
settings: SerializationSettings,
):
Returns the container definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
SerializationSettings |
get_custom()
def get_custom(
settings: flytekit.configuration.SerializationSettings,
):
Return additional plugin-specific custom data (if any) as a serializable dictionary.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_default_command()
def get_default_command(
settings: SerializationSettings,
):
Returns the default pyflyte-execute command used to run this on hosted Flyte platforms.
Parameter |
Type |
settings |
SerializationSettings |
get_extended_resources()
def get_extended_resources(
settings: SerializationSettings,
):
Returns the extended resources to allocate to the task on hosted Flyte.
Parameter |
Type |
settings |
SerializationSettings |
get_image()
def get_image(
settings: SerializationSettings,
):
Update image spec based on fast registration usage, and return string representing the image
Parameter |
Type |
settings |
SerializationSettings |
Returns the names and python types as a dictionary for the inputs of this task.
get_k8s_pod()
def get_k8s_pod(
settings: SerializationSettings,
):
Returns the kubernetes pod definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
SerializationSettings |
get_sql()
def get_sql(
settings: flytekit.configuration.SerializationSettings,
):
Returns the Sql definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
def get_type_for_input_var(
k: str,
v: typing.Any,
):
Returns the python type for an input variable by name.
Parameter |
Type |
k |
str |
v |
typing.Any |
get_type_for_output_var()
def get_type_for_output_var(
k: str,
v: typing.Any,
):
Returns the python type for the specified output variable by name.
Parameter |
Type |
k |
str |
v |
typing.Any |
local_execute()
def local_execute(
ctx: flytekit.core.context_manager.FlyteContext,
kwargs,
):
This function is used only in the local execution path and is responsible for calling dispatch execute.
Use this function when calling a task with native values (or Promises containing Flyte literals derived from
Python native values).
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
kwargs |
**kwargs |
local_execution_mode()
def local_execution_mode()
post_execute()
def post_execute(
user_params: typing.Optional[flytekit.core.context_manager.ExecutionParameters],
rval: typing.Any,
):
Post execute is called after the execution has completed, with the user_params and can be used to clean-up,
or alter the outputs to match the intended tasks outputs. If not overridden, then this function is a No-op
Parameter |
Type |
user_params |
typing.Optional[flytekit.core.context_manager.ExecutionParameters] |
rval |
typing.Any |
pre_execute()
def pre_execute(
user_params: typing.Optional[flytekit.core.context_manager.ExecutionParameters],
):
This is the method that will be invoked directly before executing the task method and before all the inputs
are converted. One particular case where this is useful is if the context is to be modified for the user process
to get some user space parameters. This also ensures that things like SparkSession are already correctly
setup before the type transformers are called
This should return either the same context of the mutated context
Parameter |
Type |
user_params |
typing.Optional[flytekit.core.context_manager.ExecutionParameters] |
reset_command_fn()
Resets the command which should be used in the container definition of this task to the default arguments.
This is useful when the command line is overridden at serialization time.
sandbox_execute()
def sandbox_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
Call dispatch_execute, in the context of a local sandbox execution. Not invoked during runtime.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
set_command_fn()
def set_command_fn(
get_command_fn: Optional[Callable[[SerializationSettings], List[str]]],
):
By default, the task will run on the Flyte platform using the pyflyte-execute command.
However, it can be useful to update the command with which the task is serialized for specific cases like
running map tasks (“pyflyte-map-execute”) or for fast-executed tasks.
Parameter |
Type |
get_command_fn |
Optional[Callable[[SerializationSettings], List[str]]] |
set_resolver()
def set_resolver(
resolver: TaskResolverMixin,
):
By default, flytekit uses the DefaultTaskResolver to resolve the task. This method allows the user to set a custom
task resolver. It can be useful to override the task resolver for specific cases like running tasks in the jupyter notebook.
Parameter |
Type |
resolver |
TaskResolverMixin |
Properties
Property |
Type |
Description |
container_image |
|
|
deck_fields |
|
|
disable_deck |
|
|
docs |
|
|
enable_deck |
|
|
environment |
|
|
instantiated_in |
|
|
interface |
|
|
lhs |
|
|
location |
|
|
metadata |
|
|
name |
|
|
python_interface |
|
|
resources |
|
|
security_context |
|
|
task_config |
|
|
task_resolver |
|
|
task_type |
|
|
task_type_version |
|
|
A Python Function task should be used as the base for all extensions that have a python function. It will
automatically detect interface of the python function and when serialized on the hosted Flyte platform handles the
writing execution command to execute the function
It is advised this task is used using the @task decorator as follows
.. code-block: python
@task
def my_func(a: int) -> str:
…
In the above code, the name of the function, the module, and the interface (inputs = int and outputs = str) will be
auto detected.
def PythonFunctionTask(
task_config: T,
task_function: Callable,
task_type,
ignore_input_vars: Optional[List[str]],
execution_mode: ExecutionBehavior,
task_resolver: Optional[TaskResolverMixin],
node_dependency_hints: Optional[Iterable[Union['PythonFunctionTask', '_annotated_launch_plan.LaunchPlan', WorkflowBase]]],
pickle_untyped: bool,
kwargs,
):
Parameter |
Type |
task_config |
T |
task_function |
Callable |
task_type |
|
ignore_input_vars |
Optional[List[str]] |
execution_mode |
ExecutionBehavior |
task_resolver |
Optional[TaskResolverMixin] |
node_dependency_hints |
Optional[Iterable[Union['PythonFunctionTask', '_annotated_launch_plan.LaunchPlan', WorkflowBase]]] |
pickle_untyped |
bool |
kwargs |
**kwargs |
Methods
Method |
Description |
compile() |
Generates a node that encapsulates this task in a workflow definition |
compile_into_workflow() |
In the case of dynamic workflows, this function will produce a workflow definition at execution time which will |
construct_node_metadata() |
Used when constructing the node that encapsulates this task as part of a broader workflow definition |
dispatch_execute() |
This method translates Flyte’s Type system based input values and invokes the actual call to the executor |
dynamic_execute() |
By the time this function is invoked, the local_execute function should have unwrapped the Promises and Flyte |
execute() |
This method will be invoked to execute the task |
find_lhs() |
None |
get_command() |
Returns the command which should be used in the container definition for the serialized version of this task |
get_config() |
Returns the task config as a serializable dictionary |
get_container() |
Returns the container definition (if any) that is used to run the task on hosted Flyte |
get_custom() |
Return additional plugin-specific custom data (if any) as a serializable dictionary |
get_default_command() |
Returns the default pyflyte-execute command used to run this on hosted Flyte platforms |
get_extended_resources() |
Returns the extended resources to allocate to the task on hosted Flyte |
get_image() |
Update image spec based on fast registration usage, and return string representing the image |
get_input_types() |
Returns the names and python types as a dictionary for the inputs of this task |
get_k8s_pod() |
Returns the kubernetes pod definition (if any) that is used to run the task on hosted Flyte |
get_sql() |
Returns the Sql definition (if any) that is used to run the task on hosted Flyte |
get_type_for_input_var() |
Returns the python type for an input variable by name |
get_type_for_output_var() |
Returns the python type for the specified output variable by name |
local_execute() |
This function is used only in the local execution path and is responsible for calling dispatch execute |
local_execution_mode() |
None |
post_execute() |
Post execute is called after the execution has completed, with the user_params and can be used to clean-up, |
pre_execute() |
This is the method that will be invoked directly before executing the task method and before all the inputs |
reset_command_fn() |
Resets the command which should be used in the container definition of this task to the default arguments |
sandbox_execute() |
Call dispatch_execute, in the context of a local sandbox execution |
set_command_fn() |
By default, the task will run on the Flyte platform using the pyflyte-execute command |
set_resolver() |
By default, flytekit uses the DefaultTaskResolver to resolve the task |
compile()
def compile(
ctx: flytekit.core.context_manager.FlyteContext,
args,
kwargs,
):
Generates a node that encapsulates this task in a workflow definition.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
args |
*args |
kwargs |
**kwargs |
compile_into_workflow()
def compile_into_workflow(
ctx: FlyteContext,
task_function: Callable,
kwargs,
):
In the case of dynamic workflows, this function will produce a workflow definition at execution time which will
then proceed to be executed.
Parameter |
Type |
ctx |
FlyteContext |
task_function |
Callable |
kwargs |
**kwargs |
def construct_node_metadata()
Used when constructing the node that encapsulates this task as part of a broader workflow definition.
dispatch_execute()
def dispatch_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
This method translates Flyte’s Type system based input values and invokes the actual call to the executor
This method is also invoked during runtime.
VoidPromise
is returned in the case when the task itself declares no outputs.
Literal Map
is returned when the task returns either one more outputs in the declaration. Individual outputs
may be none
DynamicJobSpec
is returned when a dynamic workflow is executed
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
dynamic_execute()
def dynamic_execute(
task_function: Callable,
kwargs,
):
By the time this function is invoked, the local_execute function should have unwrapped the Promises and Flyte
literal wrappers so that the kwargs we are working with here are now Python native literal values. This
function is also expected to return Python native literal values.
Since the user code within a dynamic task constitute a workflow, we have to first compile the workflow, and
then execute that workflow.
When running for real in production, the task would stop after the compilation step, and then create a file
representing that newly generated workflow, instead of executing it.
Parameter |
Type |
task_function |
Callable |
kwargs |
**kwargs |
execute()
This method will be invoked to execute the task. If you do decide to override this method you must also
handle dynamic tasks or you will no longer be able to use the task as a dynamic task generator.
Parameter |
Type |
kwargs |
**kwargs |
find_lhs()
get_command()
def get_command(
settings: SerializationSettings,
):
Returns the command which should be used in the container definition for the serialized version of this task
registered on a hosted Flyte platform.
Parameter |
Type |
settings |
SerializationSettings |
get_config()
def get_config(
settings: SerializationSettings,
):
Returns the task config as a serializable dictionary. This task config consists of metadata about the custom
defined for this task.
Parameter |
Type |
settings |
SerializationSettings |
get_container()
def get_container(
settings: SerializationSettings,
):
Returns the container definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
SerializationSettings |
get_custom()
def get_custom(
settings: flytekit.configuration.SerializationSettings,
):
Return additional plugin-specific custom data (if any) as a serializable dictionary.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_default_command()
def get_default_command(
settings: SerializationSettings,
):
Returns the default pyflyte-execute command used to run this on hosted Flyte platforms.
Parameter |
Type |
settings |
SerializationSettings |
get_extended_resources()
def get_extended_resources(
settings: SerializationSettings,
):
Returns the extended resources to allocate to the task on hosted Flyte.
Parameter |
Type |
settings |
SerializationSettings |
get_image()
def get_image(
settings: SerializationSettings,
):
Update image spec based on fast registration usage, and return string representing the image
Parameter |
Type |
settings |
SerializationSettings |
Returns the names and python types as a dictionary for the inputs of this task.
get_k8s_pod()
def get_k8s_pod(
settings: SerializationSettings,
):
Returns the kubernetes pod definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
SerializationSettings |
get_sql()
def get_sql(
settings: flytekit.configuration.SerializationSettings,
):
Returns the Sql definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
def get_type_for_input_var(
k: str,
v: typing.Any,
):
Returns the python type for an input variable by name.
Parameter |
Type |
k |
str |
v |
typing.Any |
get_type_for_output_var()
def get_type_for_output_var(
k: str,
v: typing.Any,
):
Returns the python type for the specified output variable by name.
Parameter |
Type |
k |
str |
v |
typing.Any |
local_execute()
def local_execute(
ctx: flytekit.core.context_manager.FlyteContext,
kwargs,
):
This function is used only in the local execution path and is responsible for calling dispatch execute.
Use this function when calling a task with native values (or Promises containing Flyte literals derived from
Python native values).
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
kwargs |
**kwargs |
local_execution_mode()
def local_execution_mode()
post_execute()
def post_execute(
user_params: typing.Optional[flytekit.core.context_manager.ExecutionParameters],
rval: typing.Any,
):
Post execute is called after the execution has completed, with the user_params and can be used to clean-up,
or alter the outputs to match the intended tasks outputs. If not overridden, then this function is a No-op
Parameter |
Type |
user_params |
typing.Optional[flytekit.core.context_manager.ExecutionParameters] |
rval |
typing.Any |
pre_execute()
def pre_execute(
user_params: typing.Optional[flytekit.core.context_manager.ExecutionParameters],
):
This is the method that will be invoked directly before executing the task method and before all the inputs
are converted. One particular case where this is useful is if the context is to be modified for the user process
to get some user space parameters. This also ensures that things like SparkSession are already correctly
setup before the type transformers are called
This should return either the same context of the mutated context
Parameter |
Type |
user_params |
typing.Optional[flytekit.core.context_manager.ExecutionParameters] |
reset_command_fn()
Resets the command which should be used in the container definition of this task to the default arguments.
This is useful when the command line is overridden at serialization time.
sandbox_execute()
def sandbox_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
Call dispatch_execute, in the context of a local sandbox execution. Not invoked during runtime.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
set_command_fn()
def set_command_fn(
get_command_fn: Optional[Callable[[SerializationSettings], List[str]]],
):
By default, the task will run on the Flyte platform using the pyflyte-execute command.
However, it can be useful to update the command with which the task is serialized for specific cases like
running map tasks (“pyflyte-map-execute”) or for fast-executed tasks.
Parameter |
Type |
get_command_fn |
Optional[Callable[[SerializationSettings], List[str]]] |
set_resolver()
def set_resolver(
resolver: TaskResolverMixin,
):
By default, flytekit uses the DefaultTaskResolver to resolve the task. This method allows the user to set a custom
task resolver. It can be useful to override the task resolver for specific cases like running tasks in the jupyter notebook.
Parameter |
Type |
resolver |
TaskResolverMixin |
Properties
Property |
Type |
Description |
container_image |
|
|
deck_fields |
|
|
disable_deck |
|
|
docs |
|
|
enable_deck |
|
|
environment |
|
|
execution_mode |
|
|
instantiated_in |
|
|
interface |
|
|
lhs |
|
|
location |
|
|
metadata |
|
|
name |
|
|
node_dependency_hints |
|
|
python_interface |
|
|
resources |
|
|
security_context |
|
|
task_config |
|
|
task_function |
|
|
task_resolver |
|
|
task_type |
|
|
task_type_version |
|
|
Base Class for all Tasks with a Python native Interface
. This should be directly used for task types, that do
not have a python function to be executed. Otherwise refer to :py:class:flytekit.PythonFunctionTask
.
def PythonTask(
task_type: str,
name: str,
task_config: typing.Optional[~T],
interface: typing.Optional[flytekit.core.interface.Interface],
environment: typing.Optional[typing.Dict[str, str]],
disable_deck: typing.Optional[bool],
enable_deck: typing.Optional[bool],
deck_fields: typing.Optional[typing.Tuple[flytekit.deck.deck.DeckField, ...]],
kwargs,
):
Parameter |
Type |
task_type |
str |
name |
str |
task_config |
typing.Optional[~T] |
interface |
typing.Optional[flytekit.core.interface.Interface] |
environment |
typing.Optional[typing.Dict[str, str]] |
disable_deck |
typing.Optional[bool] |
enable_deck |
typing.Optional[bool] |
deck_fields |
typing.Optional[typing.Tuple[flytekit.deck.deck.DeckField, ...]] |
kwargs |
**kwargs |
Methods
Method |
Description |
compile() |
Generates a node that encapsulates this task in a workflow definition |
construct_node_metadata() |
Used when constructing the node that encapsulates this task as part of a broader workflow definition |
dispatch_execute() |
This method translates Flyte’s Type system based input values and invokes the actual call to the executor |
execute() |
This method will be invoked to execute the task |
find_lhs() |
None |
get_config() |
Returns the task config as a serializable dictionary |
get_container() |
Returns the container definition (if any) that is used to run the task on hosted Flyte |
get_custom() |
Return additional plugin-specific custom data (if any) as a serializable dictionary |
get_extended_resources() |
Returns the extended resources to allocate to the task on hosted Flyte |
get_input_types() |
Returns the names and python types as a dictionary for the inputs of this task |
get_k8s_pod() |
Returns the kubernetes pod definition (if any) that is used to run the task on hosted Flyte |
get_sql() |
Returns the Sql definition (if any) that is used to run the task on hosted Flyte |
get_type_for_input_var() |
Returns the python type for an input variable by name |
get_type_for_output_var() |
Returns the python type for the specified output variable by name |
local_execute() |
This function is used only in the local execution path and is responsible for calling dispatch execute |
local_execution_mode() |
None |
post_execute() |
Post execute is called after the execution has completed, with the user_params and can be used to clean-up, |
pre_execute() |
This is the method that will be invoked directly before executing the task method and before all the inputs |
sandbox_execute() |
Call dispatch_execute, in the context of a local sandbox execution |
compile()
def compile(
ctx: flytekit.core.context_manager.FlyteContext,
args,
kwargs,
):
Generates a node that encapsulates this task in a workflow definition.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
args |
*args |
kwargs |
**kwargs |
def construct_node_metadata()
Used when constructing the node that encapsulates this task as part of a broader workflow definition.
dispatch_execute()
def dispatch_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
This method translates Flyte’s Type system based input values and invokes the actual call to the executor
This method is also invoked during runtime.
VoidPromise
is returned in the case when the task itself declares no outputs.
Literal Map
is returned when the task returns either one more outputs in the declaration. Individual outputs
may be none
DynamicJobSpec
is returned when a dynamic workflow is executed
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
execute()
This method will be invoked to execute the task.
Parameter |
Type |
kwargs |
**kwargs |
find_lhs()
get_config()
def get_config(
settings: flytekit.configuration.SerializationSettings,
):
Returns the task config as a serializable dictionary. This task config consists of metadata about the custom
defined for this task.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_container()
def get_container(
settings: flytekit.configuration.SerializationSettings,
):
Returns the container definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_custom()
def get_custom(
settings: flytekit.configuration.SerializationSettings,
):
Return additional plugin-specific custom data (if any) as a serializable dictionary.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_extended_resources()
def get_extended_resources(
settings: flytekit.configuration.SerializationSettings,
):
Returns the extended resources to allocate to the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
Returns the names and python types as a dictionary for the inputs of this task.
get_k8s_pod()
def get_k8s_pod(
settings: flytekit.configuration.SerializationSettings,
):
Returns the kubernetes pod definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_sql()
def get_sql(
settings: flytekit.configuration.SerializationSettings,
):
Returns the Sql definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
def get_type_for_input_var(
k: str,
v: typing.Any,
):
Returns the python type for an input variable by name.
Parameter |
Type |
k |
str |
v |
typing.Any |
get_type_for_output_var()
def get_type_for_output_var(
k: str,
v: typing.Any,
):
Returns the python type for the specified output variable by name.
Parameter |
Type |
k |
str |
v |
typing.Any |
local_execute()
def local_execute(
ctx: flytekit.core.context_manager.FlyteContext,
kwargs,
):
This function is used only in the local execution path and is responsible for calling dispatch execute.
Use this function when calling a task with native values (or Promises containing Flyte literals derived from
Python native values).
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
kwargs |
**kwargs |
local_execution_mode()
def local_execution_mode()
post_execute()
def post_execute(
user_params: typing.Optional[flytekit.core.context_manager.ExecutionParameters],
rval: typing.Any,
):
Post execute is called after the execution has completed, with the user_params and can be used to clean-up,
or alter the outputs to match the intended tasks outputs. If not overridden, then this function is a No-op
Parameter |
Type |
user_params |
typing.Optional[flytekit.core.context_manager.ExecutionParameters] |
rval |
typing.Any |
pre_execute()
def pre_execute(
user_params: typing.Optional[flytekit.core.context_manager.ExecutionParameters],
):
This is the method that will be invoked directly before executing the task method and before all the inputs
are converted. One particular case where this is useful is if the context is to be modified for the user process
to get some user space parameters. This also ensures that things like SparkSession are already correctly
setup before the type transformers are called
This should return either the same context of the mutated context
Parameter |
Type |
user_params |
typing.Optional[flytekit.core.context_manager.ExecutionParameters] |
sandbox_execute()
def sandbox_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
Call dispatch_execute, in the context of a local sandbox execution. Not invoked during runtime.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
Properties
Property |
Type |
Description |
deck_fields |
|
|
disable_deck |
|
|
docs |
|
|
enable_deck |
|
|
environment |
|
|
instantiated_in |
|
|
interface |
|
|
lhs |
|
|
location |
|
|
metadata |
|
|
name |
|
|
python_interface |
|
|
security_context |
|
|
task_config |
|
|
task_type |
|
|
task_type_version |
|
|
def ReferenceEntity(
reference: typing.Union[flytekit.core.reference_entity.WorkflowReference, flytekit.core.reference_entity.TaskReference, flytekit.core.reference_entity.LaunchPlanReference],
inputs: typing.Dict[str, typing.Type],
outputs: typing.Dict[str, typing.Type],
):
Parameter |
Type |
reference |
typing.Union[flytekit.core.reference_entity.WorkflowReference, flytekit.core.reference_entity.TaskReference, flytekit.core.reference_entity.LaunchPlanReference] |
inputs |
typing.Dict[str, typing.Type] |
outputs |
typing.Dict[str, typing.Type] |
Methods
compile()
def compile(
ctx: flytekit.core.context_manager.FlyteContext,
args,
kwargs,
):
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
args |
*args |
kwargs |
**kwargs |
def construct_node_metadata()
execute()
Parameter |
Type |
kwargs |
**kwargs |
local_execute()
def local_execute(
ctx: flytekit.core.context_manager.FlyteContext,
kwargs,
):
Please see the local_execute comments in the main task.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
kwargs |
**kwargs |
local_execution_mode()
def local_execution_mode()
unwrap_literal_map_and_execute()
def unwrap_literal_map_and_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
Please see the implementation of the dispatch_execute function in the real task.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
Properties
Property |
Type |
Description |
id |
|
|
interface |
|
|
name |
|
|
python_interface |
|
|
reference |
|
|
A reference launch plan serves as a pointer to a Launch Plan that already exists on your Flyte installation. This
object will not initiate a network call to Admin, which is why the user is asked to provide the expected interface.
If at registration time the interface provided causes an issue with compilation, an error will be returned.
def ReferenceLaunchPlan(
project: str,
domain: str,
name: str,
version: str,
inputs: Dict[str, Type],
outputs: Dict[str, Type],
):
Parameter |
Type |
project |
str |
domain |
str |
name |
str |
version |
str |
inputs |
Dict[str, Type] |
outputs |
Dict[str, Type] |
Methods
clone_with()
def clone_with(
name: str,
parameters: Optional[_interface_models.ParameterMap],
fixed_inputs: Optional[_literal_models.LiteralMap],
schedule: Optional[_schedule_model.Schedule],
notifications: Optional[List[_common_models.Notification]],
labels: Optional[_common_models.Labels],
annotations: Optional[_common_models.Annotations],
raw_output_data_config: Optional[_common_models.RawOutputDataConfig],
max_parallelism: Optional[int],
security_context: Optional[security.SecurityContext],
trigger: Optional[LaunchPlanTriggerBase],
overwrite_cache: Optional[bool],
auto_activate: bool,
):
Parameter |
Type |
name |
str |
parameters |
Optional[_interface_models.ParameterMap] |
fixed_inputs |
Optional[_literal_models.LiteralMap] |
schedule |
Optional[_schedule_model.Schedule] |
notifications |
Optional[List[_common_models.Notification]] |
labels |
Optional[_common_models.Labels] |
annotations |
Optional[_common_models.Annotations] |
raw_output_data_config |
Optional[_common_models.RawOutputDataConfig] |
max_parallelism |
Optional[int] |
security_context |
Optional[security.SecurityContext] |
trigger |
Optional[LaunchPlanTriggerBase] |
overwrite_cache |
Optional[bool] |
auto_activate |
bool |
compile()
def compile(
ctx: flytekit.core.context_manager.FlyteContext,
args,
kwargs,
):
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
args |
*args |
kwargs |
**kwargs |
def construct_node_metadata()
create()
def create(
name: str,
workflow: _annotated_workflow.WorkflowBase,
default_inputs: Optional[Dict[str, Any]],
fixed_inputs: Optional[Dict[str, Any]],
schedule: Optional[_schedule_model.Schedule],
notifications: Optional[List[_common_models.Notification]],
labels: Optional[_common_models.Labels],
annotations: Optional[_common_models.Annotations],
raw_output_data_config: Optional[_common_models.RawOutputDataConfig],
max_parallelism: Optional[int],
security_context: Optional[security.SecurityContext],
auth_role: Optional[_common_models.AuthRole],
trigger: Optional[LaunchPlanTriggerBase],
overwrite_cache: Optional[bool],
auto_activate: bool,
):
Parameter |
Type |
name |
str |
workflow |
_annotated_workflow.WorkflowBase |
default_inputs |
Optional[Dict[str, Any]] |
fixed_inputs |
Optional[Dict[str, Any]] |
schedule |
Optional[_schedule_model.Schedule] |
notifications |
Optional[List[_common_models.Notification]] |
labels |
Optional[_common_models.Labels] |
annotations |
Optional[_common_models.Annotations] |
raw_output_data_config |
Optional[_common_models.RawOutputDataConfig] |
max_parallelism |
Optional[int] |
security_context |
Optional[security.SecurityContext] |
auth_role |
Optional[_common_models.AuthRole] |
trigger |
Optional[LaunchPlanTriggerBase] |
overwrite_cache |
Optional[bool] |
auto_activate |
bool |
execute()
Parameter |
Type |
kwargs |
**kwargs |
get_default_launch_plan()
def get_default_launch_plan(
ctx: FlyteContext,
workflow: _annotated_workflow.WorkflowBase,
):
Users should probably call the get_or_create function defined below instead. A default launch plan is the one
that will just pick up whatever default values are defined in the workflow function signature (if any) and
use the default auth information supplied during serialization, with no notifications or schedules.
Parameter |
Type |
ctx |
FlyteContext |
workflow |
_annotated_workflow.WorkflowBase |
get_or_create()
def get_or_create(
workflow: _annotated_workflow.WorkflowBase,
name: Optional[str],
default_inputs: Optional[Dict[str, Any]],
fixed_inputs: Optional[Dict[str, Any]],
schedule: Optional[_schedule_model.Schedule],
notifications: Optional[List[_common_models.Notification]],
labels: Optional[_common_models.Labels],
annotations: Optional[_common_models.Annotations],
raw_output_data_config: Optional[_common_models.RawOutputDataConfig],
max_parallelism: Optional[int],
security_context: Optional[security.SecurityContext],
auth_role: Optional[_common_models.AuthRole],
trigger: Optional[LaunchPlanTriggerBase],
overwrite_cache: Optional[bool],
auto_activate: bool,
):
This function offers a friendlier interface for creating launch plans. If the name for the launch plan is not
supplied, this assumes you are looking for the default launch plan for the workflow. If it is specified, it
will be used. If creating the default launch plan, none of the other arguments may be specified.
The resulting launch plan is also cached and if called again with the same name, the
cached version is returned
Parameter |
Type |
workflow |
_annotated_workflow.WorkflowBase |
name |
Optional[str] |
default_inputs |
Optional[Dict[str, Any]] |
fixed_inputs |
Optional[Dict[str, Any]] |
schedule |
Optional[_schedule_model.Schedule] |
notifications |
Optional[List[_common_models.Notification]] |
labels |
Optional[_common_models.Labels] |
annotations |
Optional[_common_models.Annotations] |
raw_output_data_config |
Optional[_common_models.RawOutputDataConfig] |
max_parallelism |
Optional[int] |
security_context |
Optional[security.SecurityContext] |
auth_role |
Optional[_common_models.AuthRole] |
trigger |
Optional[LaunchPlanTriggerBase] |
overwrite_cache |
Optional[bool] |
auto_activate |
bool |
local_execute()
def local_execute(
ctx: flytekit.core.context_manager.FlyteContext,
kwargs,
):
Please see the local_execute comments in the main task.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
kwargs |
**kwargs |
local_execution_mode()
def local_execution_mode()
unwrap_literal_map_and_execute()
def unwrap_literal_map_and_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
Please see the implementation of the dispatch_execute function in the real task.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
Properties
Property |
Type |
Description |
annotations |
|
|
fixed_inputs |
|
|
id |
|
|
interface |
|
|
labels |
|
|
max_parallelism |
|
|
name |
|
|
notifications |
|
|
overwrite_cache |
|
|
parameters |
|
|
python_interface |
|
|
raw_output_data_config |
|
|
reference |
|
|
saved_inputs |
|
|
schedule |
|
|
security_context |
|
|
should_auto_activate |
|
|
trigger |
|
|
workflow |
|
|
def ReferenceSpec(
template: flytekit.core.reference_entity.ReferenceTemplate,
):
Parameter |
Type |
template |
flytekit.core.reference_entity.ReferenceTemplate |
Properties
Property |
Type |
Description |
template |
|
|
This is a reference task, the body of the function passed in through the constructor will never be used, only the
signature of the function will be. The signature should also match the signature of the task you’re referencing,
as stored by Flyte Admin, if not, workflows using this will break upon compilation.
def ReferenceTask(
project: str,
domain: str,
name: str,
version: str,
inputs: Dict[str, type],
outputs: Dict[str, Type],
):
Parameter |
Type |
project |
str |
domain |
str |
name |
str |
version |
str |
inputs |
Dict[str, type] |
outputs |
Dict[str, Type] |
Methods
compile()
def compile(
ctx: flytekit.core.context_manager.FlyteContext,
args,
kwargs,
):
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
args |
*args |
kwargs |
**kwargs |
def construct_node_metadata()
dispatch_execute()
def dispatch_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
This method translates Flyte’s Type system based input values and invokes the actual call to the executor
This method is also invoked during runtime.
VoidPromise
is returned in the case when the task itself declares no outputs.
Literal Map
is returned when the task returns either one more outputs in the declaration. Individual outputs
may be none
DynamicJobSpec
is returned when a dynamic workflow is executed
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
execute()
Parameter |
Type |
kwargs |
**kwargs |
find_lhs()
get_config()
def get_config(
settings: flytekit.configuration.SerializationSettings,
):
Returns the task config as a serializable dictionary. This task config consists of metadata about the custom
defined for this task.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_container()
def get_container(
settings: flytekit.configuration.SerializationSettings,
):
Returns the container definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_custom()
def get_custom(
settings: flytekit.configuration.SerializationSettings,
):
Return additional plugin-specific custom data (if any) as a serializable dictionary.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_extended_resources()
def get_extended_resources(
settings: flytekit.configuration.SerializationSettings,
):
Returns the extended resources to allocate to the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
Returns the names and python types as a dictionary for the inputs of this task.
get_k8s_pod()
def get_k8s_pod(
settings: flytekit.configuration.SerializationSettings,
):
Returns the kubernetes pod definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
get_sql()
def get_sql(
settings: flytekit.configuration.SerializationSettings,
):
Returns the Sql definition (if any) that is used to run the task on hosted Flyte.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
def get_type_for_input_var(
k: str,
v: typing.Any,
):
Returns the python type for an input variable by name.
Parameter |
Type |
k |
str |
v |
typing.Any |
get_type_for_output_var()
def get_type_for_output_var(
k: str,
v: typing.Any,
):
Returns the python type for the specified output variable by name.
Parameter |
Type |
k |
str |
v |
typing.Any |
local_execute()
def local_execute(
ctx: flytekit.core.context_manager.FlyteContext,
kwargs,
):
Please see the local_execute comments in the main task.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
kwargs |
**kwargs |
local_execution_mode()
def local_execution_mode()
post_execute()
def post_execute(
user_params: typing.Optional[flytekit.core.context_manager.ExecutionParameters],
rval: typing.Any,
):
Post execute is called after the execution has completed, with the user_params and can be used to clean-up,
or alter the outputs to match the intended tasks outputs. If not overridden, then this function is a No-op
Parameter |
Type |
user_params |
typing.Optional[flytekit.core.context_manager.ExecutionParameters] |
rval |
typing.Any |
pre_execute()
def pre_execute(
user_params: typing.Optional[flytekit.core.context_manager.ExecutionParameters],
):
This is the method that will be invoked directly before executing the task method and before all the inputs
are converted. One particular case where this is useful is if the context is to be modified for the user process
to get some user space parameters. This also ensures that things like SparkSession are already correctly
setup before the type transformers are called
This should return either the same context of the mutated context
Parameter |
Type |
user_params |
typing.Optional[flytekit.core.context_manager.ExecutionParameters] |
sandbox_execute()
def sandbox_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
Call dispatch_execute, in the context of a local sandbox execution. Not invoked during runtime.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
unwrap_literal_map_and_execute()
def unwrap_literal_map_and_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
Please see the implementation of the dispatch_execute function in the real task.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
Properties
Property |
Type |
Description |
deck_fields |
|
|
disable_deck |
|
|
docs |
|
|
enable_deck |
|
|
environment |
|
|
id |
|
|
instantiated_in |
|
|
interface |
|
|
lhs |
|
|
location |
|
|
metadata |
|
|
name |
|
|
python_interface |
|
|
reference |
|
|
security_context |
|
|
task_config |
|
|
task_type |
|
|
task_type_version |
|
|
def ReferenceTemplate(
id: flytekit.models.core.identifier.Identifier,
resource_type: int,
):
A reference template encapsulates all the information necessary to use reference entities within other
workflows or dynamic tasks.
Parameter |
Type |
id |
flytekit.models.core.identifier.Identifier |
resource_type |
int |
Properties
Property |
Type |
Description |
id |
|
|
resource_type |
|
|
A reference workflow is a pointer to a workflow that already exists on your Flyte installation. This
object will not initiate a network call to Admin, which is why the user is asked to provide the expected interface.
If at registration time the interface provided causes an issue with compilation, an error will be returned.
def ReferenceWorkflow(
project: str,
domain: str,
name: str,
version: str,
inputs: Dict[str, Type],
outputs: Dict[str, Type],
):
Parameter |
Type |
project |
str |
domain |
str |
name |
str |
version |
str |
inputs |
Dict[str, Type] |
outputs |
Dict[str, Type] |
Methods
add()
def add(
t: flytekit.core.python_auto_container.PythonAutoContainerTask,
):
Parameter |
Type |
t |
flytekit.core.python_auto_container.PythonAutoContainerTask |
compile()
def compile(
ctx: flytekit.core.context_manager.FlyteContext,
args,
kwargs,
):
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
args |
*args |
kwargs |
**kwargs |
def construct_node_metadata()
execute()
Parameter |
Type |
kwargs |
**kwargs |
find_lhs()
get_all_tasks()
Future proof method. Just making it easy to access all tasks (Not required today as we auto register them)
load_task()
def load_task(
loader_args: typing.List[str],
):
Given the set of identifier keys, should return one Python Task or raise an error if not found
Parameter |
Type |
loader_args |
typing.List[str] |
loader_args()
def loader_args(
settings: flytekit.configuration.SerializationSettings,
t: flytekit.core.python_auto_container.PythonAutoContainerTask,
):
This is responsible for turning an instance of a task into args that the load_task function can reconstitute.
Parameter |
Type |
settings |
flytekit.configuration.SerializationSettings |
t |
flytekit.core.python_auto_container.PythonAutoContainerTask |
local_execute()
def local_execute(
ctx: flytekit.core.context_manager.FlyteContext,
kwargs,
):
Please see the local_execute comments in the main task.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
kwargs |
**kwargs |
local_execution_mode()
def local_execution_mode()
task_name()
def task_name(
t: PythonAutoContainerTask,
):
Overridable function that can optionally return a custom name for a given task
Parameter |
Type |
t |
PythonAutoContainerTask |
unwrap_literal_map_and_execute()
def unwrap_literal_map_and_execute(
ctx: flytekit.core.context_manager.FlyteContext,
input_literal_map: flytekit.models.literals.LiteralMap,
):
Please see the implementation of the dispatch_execute function in the real task.
Parameter |
Type |
ctx |
flytekit.core.context_manager.FlyteContext |
input_literal_map |
flytekit.models.literals.LiteralMap |
Properties
Property |
Type |
Description |
default_options |
|
|
docs |
|
|
failure_node |
|
|
function |
|
|
id |
|
|
instantiated_in |
|
|
interface |
|
|
lhs |
|
|
location |
|
|
name |
|
|
nodes |
|
|
on_failure |
|
|
output_bindings |
|
|
python_interface |
|
|
reference |
|
|
short_name |
|
|
workflow_metadata |
|
|
workflow_metadata_defaults |
|
|
These settings are provided while serializing a workflow and task, before registration. This is required to get
runtime information at serialization time, as well as some defaults.
Attributes:
project (str): The project (if any) with which to register entities under.
domain (str): The domain (if any) with which to register entities under.
version (str): The version (if any) with which to register entities under.
image_config (ImageConfig): The image config used to define task container images.
env (Optional[Dict[str, str]]): Environment variables injected into task container definitions.
flytekit_virtualenv_root (Optional[str]): During out of container serialize the absolute path of the flytekit
virtualenv at serialization time won’t match the in-container value at execution time. This optional value
is used to provide the in-container virtualenv path
python_interpreter (Optional[str]): The python executable to use. This is used for spark tasks in out of
container execution.
entrypoint_settings (Optional[EntrypointSettings]): Information about the command, path and version of the
entrypoint program.
fast_serialization_settings (Optional[FastSerializationSettings]): If the code is being serialized so that it
can be fast registered (and thus omit building a Docker image) this object contains additional parameters
for serialization.
source_root (Optional[str]): The root directory of the source code.
def SerializationSettings(
image_config: ImageConfig,
project: typing.Optional[str],
domain: typing.Optional[str],
version: typing.Optional[str],
env: Optional[Dict[str, str]],
git_repo: Optional[str],
python_interpreter: str,
flytekit_virtualenv_root: Optional[str],
fast_serialization_settings: Optional[FastSerializationSettings],
source_root: Optional[str],
):
Parameter |
Type |
image_config |
ImageConfig |
project |
typing.Optional[str] |
domain |
typing.Optional[str] |
version |
typing.Optional[str] |
env |
Optional[Dict[str, str]] |
git_repo |
Optional[str] |
python_interpreter |
str |
flytekit_virtualenv_root |
Optional[str] |
fast_serialization_settings |
Optional[FastSerializationSettings] |
source_root |
Optional[str] |
Methods
default_entrypoint_settings()
def default_entrypoint_settings(
interpreter_path: str,
):
Assumes the entrypoint is installed in a virtual-environment where the interpreter is
Parameter |
Type |
interpreter_path |
str |
for_image()
def for_image(
image: str,
version: str,
project: str,
domain: str,
python_interpreter_path: str,
):
Parameter |
Type |
image |
str |
version |
str |
project |
str |
domain |
str |
python_interpreter_path |
str |
from_dict()
def from_dict(
kvs: typing.Union[dict, list, str, int, float, bool, NoneType],
infer_missing,
):
Parameter |
Type |
kvs |
typing.Union[dict, list, str, int, float, bool, NoneType] |
infer_missing |
|
from_json()
def from_json(
s: typing.Union[str, bytes, bytearray],
parse_float,
parse_int,
parse_constant,
infer_missing,
kw,
):
Parameter |
Type |
s |
typing.Union[str, bytes, bytearray] |
parse_float |
|
parse_int |
|
parse_constant |
|
infer_missing |
|
kw |
|
from_transport()
def from_transport(
s: str,
):
new_builder()
Creates a SerializationSettings.Builder
that copies the existing serialization settings parameters and
allows for customization.
schema()
def schema(
infer_missing: bool,
only,
exclude,
many: bool,
context,
load_only,
dump_only,
partial: bool,
unknown,
):
Parameter |
Type |
infer_missing |
bool |
only |
|
exclude |
|
many |
bool |
context |
|
load_only |
|
dump_only |
|
partial |
bool |
unknown |
|
should_fast_serialize()
def should_fast_serialize()
Whether or not the serialization settings specify that entities should be serialized for fast registration.
to_dict()
def to_dict(
encode_json,
):
Parameter |
Type |
encode_json |
|
to_json()
def to_json(
skipkeys: bool,
ensure_ascii: bool,
check_circular: bool,
allow_nan: bool,
indent: typing.Union[int, str, NoneType],
separators: typing.Tuple[str, str],
default: typing.Callable,
sort_keys: bool,
kw,
):
Parameter |
Type |
skipkeys |
bool |
ensure_ascii |
bool |
check_circular |
bool |
allow_nan |
bool |
indent |
typing.Union[int, str, NoneType] |
separators |
typing.Tuple[str, str] |
default |
typing.Callable |
sort_keys |
bool |
kw |
|
venv_root_from_interpreter()
def venv_root_from_interpreter(
interpreter_path: str,
):
Computes the path of the virtual environment root, based on the passed in python interpreter path
for example /opt/venv/bin/python3 -> /opt/venv
Parameter |
Type |
interpreter_path |
str |
with_serialized_context()
def with_serialized_context()
Use this method to create a new SerializationSettings that has an environment variable set with the SerializedContext
This is useful in transporting SerializedContext to serialized and registered tasks.
The setting will be available in the env
field with the key SERIALIZED_CONTEXT_ENV_VAR
:return: A newly constructed SerializationSettings, or self, if it already has the serializationSettings
Properties
Property |
Type |
Description |
entrypoint_settings |
|
|
serialized_context |
|
|
def SignalCondition(
signal_id: str,
type: flytekit.models.types.LiteralType,
output_variable_name: str,
):
Represents a dependency on an signal from a user.
Parameter |
Type |
signal_id |
str |
type |
flytekit.models.types.LiteralType |
output_variable_name |
str |
Methods
from_flyte_idl()
def from_flyte_idl(
pb2_object: flyteidl.core.workflow_pb2.SignalCondition,
):
Parameter |
Type |
pb2_object |
flyteidl.core.workflow_pb2.SignalCondition |
serialize_to_string()
def serialize_to_string()
short_string()
to_flyte_idl()
verbose_string()
Properties
Property |
Type |
Description |
is_empty |
|
|
output_variable_name |
|
|
signal_id |
|
|
type |
|
|
def SleepCondition(
duration: datetime.timedelta,
):
A sleep condition.
Parameter |
Type |
duration |
datetime.timedelta |
Methods
from_flyte_idl()
def from_flyte_idl(
pb2_object: flyteidl.core.workflow_pb2.SignalCondition,
):
Parameter |
Type |
pb2_object |
flyteidl.core.workflow_pb2.SignalCondition |
serialize_to_string()
def serialize_to_string()
short_string()
to_flyte_idl()
verbose_string()
Properties
Property |
Type |
Description |
duration |
|
|
is_empty |
|
|
Link to source code used to define this task or workflow.
def SourceCode(
link: typing.Optional[str],
):
Parameter |
Type |
link |
typing.Optional[str] |
Methods
from_flyte_idl()
def from_flyte_idl(
pb2_object: flyteidl.admin.description_entity_pb2.SourceCode,
):
Parameter |
Type |
pb2_object |
flyteidl.admin.description_entity_pb2.SourceCode |
serialize_to_string()
def serialize_to_string()
short_string()
to_flyte_idl()
verbose_string()
Properties
Property |
Type |
Description |
is_empty |
|
|
def TaskNodeOverrides(
resources: typing.Optional[flytekit.models.task.Resources],
extended_resources: typing.Optional[flyteidl.core.tasks_pb2.ExtendedResources],
container_image: typing.Optional[str],
pod_template: typing.Optional[flytekit.core.pod_template.PodTemplate],
):
Parameter |
Type |
resources |
typing.Optional[flytekit.models.task.Resources] |
extended_resources |
typing.Optional[flyteidl.core.tasks_pb2.ExtendedResources] |
container_image |
typing.Optional[str] |
pod_template |
typing.Optional[flytekit.core.pod_template.PodTemplate] |
Methods
from_flyte_idl()
def from_flyte_idl(
pb2_object,
):
Parameter |
Type |
pb2_object |
|
serialize_to_string()
def serialize_to_string()
short_string()
to_flyte_idl()
verbose_string()
Properties
Property |
Type |
Description |
container_image |
|
|
extended_resources |
|
|
is_empty |
|
|
pod_template |
|
|
resources |
|
|
def TaskSpec(
template: flytekit.models.task.TaskTemplate,
docs: typing.Optional[flytekit.models.documentation.Documentation],
):
Parameter |
Type |
template |
flytekit.models.task.TaskTemplate |
docs |
typing.Optional[flytekit.models.documentation.Documentation] |
Methods
from_flyte_idl()
def from_flyte_idl(
pb2_object,
):
Parameter |
Type |
pb2_object |
|
serialize_to_string()
def serialize_to_string()
short_string()
to_flyte_idl()
verbose_string()
Properties
Property |
Type |
Description |
docs |
|
|
is_empty |
|
|
template |
|
|
def TaskTemplate(
id,
type,
metadata,
interface,
custom,
container,
task_type_version,
security_context,
config,
k8s_pod,
sql,
extended_resources,
):
A task template represents the full set of information necessary to perform a unit of work in the Flyte system.
It contains the metadata about what inputs and outputs are consumed or produced. It also contains the metadata
necessary for Flyte Propeller to do the appropriate work.
Parameter |
Type |
id |
|
type |
|
metadata |
|
interface |
|
custom |
|
container |
|
task_type_version |
|
security_context |
|
config |
|
k8s_pod |
|
sql |
|
extended_resources |
|
Methods
from_flyte_idl()
def from_flyte_idl(
pb2_object,
):
Parameter |
Type |
pb2_object |
|
serialize_to_string()
def serialize_to_string()
short_string()
to_flyte_idl()
verbose_string()
Properties
Property |
Type |
Description |
config |
|
|
container |
|
|
custom |
|
|
extended_resources |
|
|
id |
|
|
interface |
|
|
is_empty |
|
|
k8s_pod |
|
|
metadata |
|
|
security_context |
|
|
sql |
|
|
task_type_version |
|
|
type |
|
|
def WorkflowBase(
name: str,
workflow_metadata: WorkflowMetadata,
workflow_metadata_defaults: WorkflowMetadataDefaults,
python_interface: Interface,
on_failure: Optional[Union[WorkflowBase, Task]],
docs: Optional[Documentation],
default_options: Optional[Options],
kwargs,
):
Parameter |
Type |
name |
str |
workflow_metadata |
WorkflowMetadata |
workflow_metadata_defaults |
WorkflowMetadataDefaults |
python_interface |
Interface |
on_failure |
Optional[Union[WorkflowBase, Task]] |
docs |
Optional[Documentation] |
default_options |
Optional[Options] |
kwargs |
**kwargs |
Methods
compile()
Parameter |
Type |
kwargs |
**kwargs |
def construct_node_metadata()
execute()
Parameter |
Type |
kwargs |
**kwargs |
local_execute()
def local_execute(
ctx: FlyteContext,
kwargs,
):
Parameter |
Type |
ctx |
FlyteContext |
kwargs |
**kwargs |
local_execution_mode()
def local_execution_mode()
Properties
Property |
Type |
Description |
default_options |
|
|
docs |
|
|
failure_node |
|
|
interface |
|
|
name |
|
|
nodes |
|
|
on_failure |
|
|
output_bindings |
|
|
python_interface |
|
|
short_name |
|
|
workflow_metadata |
|
|
workflow_metadata_defaults |
|
|
def WorkflowSpec(
template: flytekit.models.core.workflow.WorkflowTemplate,
sub_workflows: typing.List[flytekit.models.core.workflow.WorkflowTemplate],
docs: typing.Optional[flytekit.models.documentation.Documentation],
):
This object fully encapsulates the specification of a workflow
Parameter |
Type |
template |
flytekit.models.core.workflow.WorkflowTemplate |
sub_workflows |
typing.List[flytekit.models.core.workflow.WorkflowTemplate] |
docs |
typing.Optional[flytekit.models.documentation.Documentation] |
Methods
from_flyte_idl()
def from_flyte_idl(
pb2_object,
):
Parameter |
Type |
pb2_object |
|
serialize_to_string()
def serialize_to_string()
short_string()
to_flyte_idl()
verbose_string()
Properties
Property |
Type |
Description |
docs |
|
|
is_empty |
|
|
sub_workflows |
|
|
template |
|
|