Execute the given lambda function in parallel with threading backend in TVM.
The for loop for (int i = 0; i < 10; i++) { a[i] = i; } should work the same as: parallel_for_with_threading_backend([&a](int i) { a[i] = i; }, 0, 10);
#ifndef TVM_RUNTIME_THREADING_BACKEND_H_
#define TVM_RUNTIME_THREADING_BACKEND_H_
#include <algorithm>
#include <functional>
#include <memory>
#include <vector>
#if defined(__linux__) || defined(__ANDROID__)
#if defined(__ANDROID__)
#ifndef CPU_SET
#define CPU_SETSIZE 1024
#define __NCPUBITS (8 * sizeof(uint64_t))
typedef struct {
uint64_t __bits[CPU_SETSIZE / __NCPUBITS];
} cpu_set_t;
#define CPU_SET(cpu, cpusetp) \
((cpusetp)->__bits[(cpu) / __NCPUBITS] |= (1UL << ((cpu) % __NCPUBITS)))
#define CPU_ZERO(cpusetp) memset((cpusetp), 0, sizeof(cpu_set_t))
#define CPU_ISSET(cpu, cpusetp) \
(1UL << ((cpu) % __NCPUBITS)) == \
((cpusetp)->__bits[(cpu) / __NCPUBITS] & (1UL << ((cpu) % __NCPUBITS)))
#define CPU_EQUAL(left, right) (memcmp(&left, &right, sizeof(cpu_set_t)) == 0)
#endif
#endif
#endif
namespace runtime {
namespace threading {
class ThreadGroup {
public:
class Impl;
TVM_DLL
ThreadGroup(
int num_workers, std::function<
void(
int)> worker_callback,
bool exclude_worker0 = false);
};
std::vector<unsigned int> cpus = {});
private:
Impl* impl_;
};
std::vector<unsigned int> cpus);
}
template <typename T>
namespace detail {
template <typename T>
struct ParallelForWithThreadingBackendLambdaInvoker {
T* lambda_ptr = static_cast<T*>(cdata);
(*lambda_ptr)(task_id, num_task);
return 0;
}
};
template <typename T>
inline void parallel_launch_with_threading_backend(T flambda) {
void* cdata = &flambda;
cdata, 0);
}
}
template <typename T>
if (end - begin == 1) {
flambda(begin);
return;
}
auto flaunch = [begin, end, flambda](int task_id, int num_task) {
int64_t total_len = end - begin;
int64_t step = (total_len + num_task - 1) / num_task;
int64_t local_begin =
std::min(begin + step * task_id, end);
int64_t local_end =
std::min(local_begin + step, end);
for (int64_t i = local_begin; i < local_end; ++i) {
flambda(i);
}
};
detail::parallel_launch_with_threading_backend(flaunch);
}
}
}
#endif
int TVMBackendParallelLaunch(FTVMParallelLambda flambda, void *cdata, int num_task)
Backend function for running parallel jobs.
void Join()
Blocks until all non-main threads in the pool finish.
AffinityMode
Definition: threading_backend.h:88
@ kBig
Definition: threading_backend.h:89
@ kLittle
Definition: threading_backend.h:90
@ kSpecifyOneCorePerThread
Definition: threading_backend.h:92
@ kSpecifyThreadShareAllCore
Definition: threading_backend.h:94
int Configure(AffinityMode mode, int nthreads, bool exclude_worker0, std::vector< unsigned int > cpus={})
configure the CPU id affinity
ThreadGroup(int num_workers, std::function< void(int)> worker_callback, bool exclude_worker0=false)
Creates a collection of threads which run a provided function.
void Configure(tvm::runtime::threading::ThreadGroup::AffinityMode mode, int nthreads, std::vector< unsigned int > cpus)
Configuring the CPU affinity mode for the working threads.
int32_t NumThreads()
Get the number of threads being used by the TVM runtime.
void ResetThreadPool()
Reset the threads in the pool. All current threads are destroyed and new ones are created.
void Yield()
Platform-agnostic no-op.
void SetMaxConcurrency(int value)
Setting the maximum number of available cores.
void parallel_for_with_threading_backend(T flambda, int64_t begin, int64_t end)
Definition: threading_backend.h:205
runtime implementation for LibTorch/TorchScript.
Definition: analyzer.h:36
PrimExpr min(PrimExpr a, PrimExpr b, Span span=Span())
take minimum of two values
Environment for TVM parallel task.
Definition: c_backend_api.h:119
int32_t num_task
total amount of task
Definition: c_backend_api.h:125