init
This commit is contained in:
commit
95dd6da242
8
.idea/.gitignore
generated
vendored
Normal file
8
.idea/.gitignore
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
# Default ignored files
|
||||
/shelf/
|
||||
/workspace.xml
|
||||
# Editor-based HTTP Client requests
|
||||
/httpRequests/
|
||||
# Datasource local storage ignored files
|
||||
/dataSources/
|
||||
/dataSources.local.xml
|
BIN
rsc/performance_comparison.jpg
Normal file
BIN
rsc/performance_comparison.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 226 KiB |
BIN
rsc/results.jpg
Normal file
BIN
rsc/results.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 438 KiB |
BIN
rsc/structure.jpg
Normal file
BIN
rsc/structure.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 470 KiB |
291
src/main.tex
Normal file
291
src/main.tex
Normal file
@ -0,0 +1,291 @@
|
||||
\documentclass[sigconf]{acmart}
|
||||
\usepackage{amsmath}
|
||||
\usepackage{mathtools}
|
||||
|
||||
\usepackage[inline]{enumitem}
|
||||
|
||||
\settopmatter{printacmref=false} % Removes citation information below abstract
|
||||
\renewcommand\footnotetextcopyrightpermission[1]{} % removes footnote with conference information in first column
|
||||
\pagestyle{plain} % removes running headers
|
||||
|
||||
%%
|
||||
%% \BibTeX command to typeset BibTeX logo in the docs
|
||||
\AtBeginDocument{%
|
||||
\providecommand\BibTeX{{%
|
||||
\normalfont B\kern-0.5em{\scshape i\kern-0.25em b}\kern-0.8em\TeX}}}
|
||||
|
||||
\acmConference{Cross-Model Pseudo-Labeling}{2023}{Linz}
|
||||
|
||||
%%
|
||||
%% end of the preamble, start of the body of the document source.
|
||||
\begin{document}
|
||||
|
||||
%%
|
||||
%% The "title" command has an optional parameter,
|
||||
%% allowing the author to define a "short title" to be used in page headers.
|
||||
\title{Minimize labeling effort of Binary classification Tasks with Active learning}
|
||||
|
||||
%%
|
||||
%% The "author" command and its associated commands are used to define
|
||||
%% the authors and their affiliations.
|
||||
%% Of note is the shared affiliation of the first two authors, and the
|
||||
%% "authornote" and "authornotemark" commands
|
||||
%% used to denote shared contribution to the research.
|
||||
\author{Lukas Heiligenbrunner}
|
||||
\email{k12104785@students.jku.at}
|
||||
\affiliation{%
|
||||
\institution{Johannes Kepler University Linz}
|
||||
\city{Linz}
|
||||
\state{Upperaustria}
|
||||
\country{Austria}
|
||||
\postcode{4020}
|
||||
}
|
||||
|
||||
%%
|
||||
%% By default, the full list of authors will be used in the page
|
||||
%% headers. Often, this list is too long, and will overlap
|
||||
%% other information printed in the page headers. This command allows
|
||||
%% the author to define a more concise list
|
||||
%% of authors' names for this purpose.
|
||||
\renewcommand{\shortauthors}{Lukas Heilgenbrunner}
|
||||
|
||||
%%
|
||||
%% The abstract is a short summary of the work to be presented in the
|
||||
%% article.
|
||||
\begin{abstract}
|
||||
Active learning might result in a faster model convergence and thus less labeled samples would be required. This method might be beneficial in areas where labeling datasets is demanding and reducing computational effort is not the main objective.
|
||||
\end{abstract}
|
||||
|
||||
%%
|
||||
%% Keywords. The author(s) should pick words that accurately describe
|
||||
%% the work being presented. Separate the keywords with commas.
|
||||
\keywords{neural networks, ResNET, pseudo-labeling, active-learning}
|
||||
|
||||
%\received{20 February 2007}
|
||||
%\received[revised]{12 March 2009}
|
||||
%\received[accepted]{5 June 2009}
|
||||
|
||||
%%
|
||||
%% This command processes the author and affiliation and title
|
||||
%% information and builds the first part of the formatted document.
|
||||
\maketitle
|
||||
|
||||
\section{Introduction}\label{sec:introduction}
|
||||
\subsection{Motivation}
|
||||
For most supervised learning tasks lots of training samples are essential.
|
||||
With too less training data the model will not generalize well and not fit a real world task.
|
||||
Labeling datasets is commonly seen as an expensive task and wants to be avoided as much as possible.
|
||||
That's why there is a machine-learning field called active learning.
|
||||
The general approach is to train a model that predicts within every iteration a ranking metric or Pseudo-Labels which then can be used to rank the importance of samples to be labeled.
|
||||
|
||||
The goal of this practical work is to test active learning within a simple classification task and evaluate its performance.
|
||||
\subsection{Research Questions}
|
||||
\subsubsection{Does Active-Learning benefit the learning process?}
|
||||
|
||||
Should Active-learning be used for classification tasks to improve learning performance?
|
||||
Furthermore, how does the sample-selection process impact the learning?
|
||||
\subsubsection{Is Dagster and Label-Studio a proper tooling to build an AL Loop?}
|
||||
Is combining Dagster with Label-Studio a good match for building scalable and reliable Active-Learning loops?
|
||||
\subsubsection{Does balancing the learning samples improve performance?}
|
||||
The sample-selection metric might select samples just from one class by chance.
|
||||
Does balancing this distribution help the model performance?
|
||||
\subsection{Outline}
|
||||
\section{Material and Methods}
|
||||
\subsection{Material}
|
||||
\subsubsection{Dagster}
|
||||
\subsubsection{Label-Studio}
|
||||
\subsubsection{Pytorch}
|
||||
\subsection{Methods}
|
||||
\subsubsection{Active-Learning}
|
||||
\subsubsection{ROC}
|
||||
\subsubsection{RESNet}
|
||||
|
||||
\section{Implementation}
|
||||
Model is defined as $g(\pmb{x};\pmb{w})$ where $\pmb{w}$ are the model weights and $\pmb{x}$ the input samples.
|
||||
We define two hyperparameters, the batch size $\mathcal{B}$ and the sample size $\mathcal{S}$ where $\mathcal{B} < \mathcal{S}$.
|
||||
In every active learning loop iteration we sample $\mathcal{S}$ random samples from our total unlabeled sample set $\mathcal{X}_S \subset\mathcal{X}_U \subset \mathcal{X}$
|
||||
\begin{equation}
|
||||
z = g(\mathcal{X}_S;\pmb{w})
|
||||
\end{equation}
|
||||
To get a class distribution summing up to one we apply a softmax to the result values.
|
||||
\begin{equation}
|
||||
\sigma(\mathbf{z})_j = \frac{e^{z_j}}{\sum_{k=1}^K e^{z_k}} \; for j\coloneqq\{0,1\}\label{eq:equation}
|
||||
\end{equation}
|
||||
|
||||
\begin{align}
|
||||
S(z) = | 0.5 - \sigma(\mathbf{z})_0| \; \textit{or} \; \arg\max_j \sigma(\mathbf{z})
|
||||
\end{align}
|
||||
|
||||
|
||||
\cite{activelearning}
|
||||
|
||||
|
||||
\section{Semi-Supervised learning}\label{sec:semi-supervised-learning}
|
||||
In traditional supervised learning we have a labeled dataset.
|
||||
Each datapoint is associated with a corresponding target label.
|
||||
The goal is to fit a model to predict the labels from datapoints.
|
||||
|
||||
In traditional unsupervised learning there are also datapoints but no labels are known.
|
||||
The goal is to find patterns or structures in the data.
|
||||
Moreover, it can be used for clustering or downprojection.
|
||||
|
||||
Those two techniques combined yield semi-supervised learning.
|
||||
Some of the labels are known, but for most of the data we have only the raw datapoints.
|
||||
The basic idea is that the unlabeled data can significantly improve the model performance when used in combination with the labeled data.
|
||||
|
||||
\section{FixMatch}\label{sec:fixmatch}
|
||||
There is an already existing approach called FixMatch.
|
||||
This was introduced in a Google Research paper from 2020~\cite{fixmatch}.
|
||||
The key idea of FixMatch is to leverage the unlabeled data by predicting pseudo-labels out of the known labels.
|
||||
Then both, the known labels and the predicted ones are used side by side to train the model.
|
||||
The labeled samples guide the learning process and the unlabeled samples gain additional information.
|
||||
|
||||
Not every pseudo prediction is kept to train the model further.
|
||||
A confidence threshold is defined to evaluate how `confident` the model is about its prediction.
|
||||
The prediction is dropped if the model is too less confident.
|
||||
The quantity and quality of the obtained labels is crucial and they have a significant impact on the overall accuracy.
|
||||
This means improving the pseudo-label framework as much as possible is essential.
|
||||
|
||||
FixMatch results in some major limitations.
|
||||
It relies on a single model for generating pseudo-labels which can introduce errors and uncertainty in the labels.
|
||||
Incorrect pseudo-labels may effect the learning process negatively.
|
||||
Furthermore, Fixmatch uses a compareably small model for label prediction which has a limited capacity.
|
||||
This can negatively affect the learning process as well.
|
||||
%There is no measure defined how certain the model is about its prediction.
|
||||
%Such a measure improves overall performance by filtering noisy and unsure predictions.
|
||||
Cross-Model Pseudo-Labeling tries to address all of those limitations.
|
||||
|
||||
\subsection{Math of FixMatch}\label{subsec:math-of-fixmatch}
|
||||
Equation~\ref{eq:fixmatch} defines the loss-function that trains the model.
|
||||
The sum over a batch size $B_u$ takes the average loss of this batch and should be familiar.
|
||||
The input data is augmented in two different ways.
|
||||
At first there is a weak augmentation $\mathcal{T}_{\text{weak}}(\cdot)$ which only applies basic transformation such as filtering and bluring.
|
||||
Moreover, there is the strong augmentation $\mathcal{T}_{\text{strong}}(\cdot)$ which does cropouts and random augmentations.
|
||||
|
||||
\begin{equation}
|
||||
\label{eq:fixmatch}
|
||||
\mathcal{L}_u = \frac{1}{B_u} \sum_{i=1}^{B_u} {1}(\max(p_i) \geq \tau) \mathcal{H}(\hat{y}_i,F(\mathcal{T}_{\text{strong}}(u_i)))
|
||||
\end{equation}
|
||||
|
||||
The indicator function ${1}(\cdot)$ applies a principle called `confidence-based masking`.
|
||||
It retains a label only if its largest probability is above a threshold $\tau$.
|
||||
Where $p_i \coloneqq F(\mathcal{T}_{\text{weak}}(u_i))$ is a model evaluation with a weakly augmented input.
|
||||
|
||||
\begin{equation}
|
||||
\label{eq:crossentropy}
|
||||
\mathcal{H}(\hat{y}_i, y_i) = -\sum_{i=1} y_i \cdot log(\hat{y}_i)
|
||||
\end{equation}
|
||||
|
||||
The second part $\mathcal{H}(\cdot, \cdot)$ is a standard Cross-entropy loss function which takes two inputs, the predicted and the true label.
|
||||
$\hat{y}_i$, the obtained pseudo-label and $F(\mathcal{T}_{\text{strong}}(u_i))$, a model evaluation with strong augmentation.
|
||||
The indicator function evaluates in $0$ if the pseudo prediction is not confident and the current loss evaluation will be dropped.
|
||||
Otherwise it evaluates to 1 and it will be kept and trains the model further.
|
||||
|
||||
\section{Cross-Model Pseudo-Labeling}\label{sec:cross-model-pseudo-labeling}
|
||||
The newly invented approach of this paper is called Cross-Model Pseudo-Labeling (CMPL)\cite{Xu_2022_CVPR}.
|
||||
Figure~\ref{fig:cmpl-structure} visualizs the structure of CMPL\@.
|
||||
Two different models, a smaller auxiliary model and a larger model are defined.
|
||||
They provide pseudo-labels for each other.
|
||||
The two different models have a different structural bias which leads to complementary representations.
|
||||
This symetric design performs a boost in performance.
|
||||
The SG label means 'Stop Gradient'.
|
||||
The loss function evaluations are fed into the opposite model as loss.
|
||||
The two models train each other.
|
||||
|
||||
|
||||
\begin{figure}[h]
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{../rsc/structure}
|
||||
\caption{Architecture of Cross-Model Pseudo-Labeling}
|
||||
\label{fig:cmpl-structure}
|
||||
\end{figure}
|
||||
|
||||
\subsection{Math of CMPL}\label{subsec:math}
|
||||
The loss function of CMPL is similar to that one explaind above.
|
||||
But we have to differ from the loss generated from the supervised samples where the labels are known and the unsupervised loss where no labels are available.
|
||||
|
||||
The two equations~\ref{eq:cmpl-losses1} and~\ref{eq:cmpl-losses2} are normal Cross-Entropy loss functions generated with the supervised labels of the two seperate models.
|
||||
|
||||
|
||||
\begin{align}
|
||||
\label{eq:cmpl-losses1}
|
||||
\mathcal{L}_s^F &= \frac{1}{B_l} \sum_{i=1}^{B_l} \mathcal{H}(y_i,F(\mathcal{T}^F_{\text{standard}}(v_i)))\\
|
||||
\label{eq:cmpl-losses2}
|
||||
\mathcal{L}_s^A &= \frac{1}{B_l} \sum_{i=1}^{B_l} \mathcal{H}(y_i,A(\mathcal{T}^F_{\text{standard}}(v_i)))
|
||||
\end{align}
|
||||
|
||||
Equation~\ref{eq:cmpl-loss3} and~\ref{eq:cmpl-loss4} are the unsupervised losses.
|
||||
They are very similar to FastMatch, but important to note is that the confidence-based masking is applied to the opposite corresponding model.
|
||||
|
||||
\begin{align}
|
||||
\label{eq:cmpl-loss3}
|
||||
\mathcal{L}_u^F &= \frac{1}{B_u} \sum_{i=1}^{B_u} {1}(\max(p_i^A) \geq \tau) \mathcal{H}(\hat{y}_i^A,F(\mathcal{T}_{\text{strong}}(u_i)))\\
|
||||
\label{eq:cmpl-loss4}
|
||||
\mathcal{L}_u^A &= \frac{1}{B_u} \sum_{i=1}^{B_u} {1}(\max(p_i^F) \geq \tau) \mathcal{H}(\hat{y}_i^F,A(\mathcal{T}_{\text{strong}}(u_i)))
|
||||
\end{align}
|
||||
|
||||
Finally to train the main objective a overall loss is calculated by simply summing all the losses.
|
||||
The loss is regulated by an hyperparamter $\lambda$ to enhance the importance of the supervised loss.
|
||||
|
||||
\begin{equation}
|
||||
\label{eq:loss-main-obj}
|
||||
\mathcal{L} = (\mathcal{L}_s^F + \mathcal{L}_s^A) + \lambda(\mathcal{L}_u^F + \mathcal{L}_u^A)
|
||||
\end{equation}
|
||||
|
||||
\section{Architecture}\label{sec:Architecture}
|
||||
The used model architectures depend highly on the task to be performed.
|
||||
In this case the task is video action recognition.
|
||||
A 3D-ResNet50 was chosen for the main model and a smaller 3D-ResNet18 for the auxiliary model.
|
||||
|
||||
\section{Performance}\label{sec:performance}
|
||||
|
||||
In figure~\ref{fig:results} a performance comparison is shown between just using the supervised samples for training against some different pseudo label frameworks.
|
||||
One can clearly see that the performance gain with the new CMPL framework is quite significant.
|
||||
For evaluation the Kinetics-400 and UCF-101 datasets are used.
|
||||
And as a backbone model a 3D-ResNet18 and 3D-ResNet50 are used.
|
||||
Even when only 1\% of true labels are known for the UCF-101 dataset 25.1\% of the labels could be predicted right.
|
||||
|
||||
\begin{figure}[h]
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{../rsc/results}
|
||||
\caption{Performance comparisons between CMPL, FixMatch and supervised learning only}
|
||||
\label{fig:results}
|
||||
\end{figure}
|
||||
|
||||
\section{Further schemes}\label{sec:further-schemes}
|
||||
How the pseudo-labels are generated may impact the overall performance.
|
||||
In this paper the pseudo-labels are obtained by the cross-model approach.
|
||||
But there might be other strategies as well.
|
||||
For example:
|
||||
\begin{enumerate*}
|
||||
\item Self-First: Each network uses just its own prediction if its confident enough.
|
||||
If not, it uses its sibling net prediction.
|
||||
\item Opposite-First: Each net prioritizes the prediction of the sibling network.
|
||||
\item Maximum: The most confident prediction is leveraged.
|
||||
\item Average: The two predictions are averaged before deriving the pseudo-label
|
||||
\end{enumerate*}.
|
||||
|
||||
Those are just other approaches one can keep in mind.
|
||||
This doesn't mean they are better, in fact they performed even worse in this study.
|
||||
|
||||
\section{Conclusion}\label{sec:conclusion}
|
||||
In conclusion, Cross-Model Pseudo-Labeling demonstrates the potential to significantly advance the field of semi-supervised action recognition.
|
||||
Cross-Model Pseudo-Labeling outperforms the supervised-only approach over several experiments by a multiple.
|
||||
It surpasses most of the other existing pseudo-labeling frameworks.
|
||||
Through the integration of main and auxiliary models, consistency regularization, and uncertainty estimation, CMPL offers a powerful framework for leveraging unlabeled data and improving model performance.
|
||||
It paves the way for more accurate and efficient action recognition systems.
|
||||
|
||||
%%
|
||||
%% The next two lines define the bibliography style to be used, and
|
||||
%% the bibliography file.
|
||||
\bibliographystyle{ACM-Reference-Format}
|
||||
\bibliography{sources}
|
||||
|
||||
%%
|
||||
%% If your work has an appendix, this is the place to put it.
|
||||
\appendix
|
||||
|
||||
% appendix
|
||||
|
||||
\end{document}
|
||||
\endinput
|
39
src/sources.bib
Normal file
39
src/sources.bib
Normal file
@ -0,0 +1,39 @@
|
||||
%! Author = lukas
|
||||
%! Date = 4/9/24
|
||||
|
||||
@InProceedings{Xu_2022_CVPR,
|
||||
author = {Xu, Yinghao and Wei, Fangyun and Sun, Xiao and Yang, Ceyuan and Shen, Yujun and Dai, Bo and Zhou, Bolei and Lin, Stephen},
|
||||
title = {Cross-Model Pseudo-Labeling for Semi-Supervised Action Recognition},
|
||||
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
|
||||
month = {June},
|
||||
year = {2022},
|
||||
pages = {2959-2968}
|
||||
}
|
||||
|
||||
@online{fixmatch,
|
||||
author = "Kihyuk Sohn, David Berthelot, Chun-Liang Li",
|
||||
title = "FixMatch: Simplifying Semi-Supervised Learning with Consistency and Confidence",
|
||||
url = "https://arxiv.org/abs/2001.07685",
|
||||
addendum = "(accessed: 20.03.2023)",
|
||||
keywords = "FixMatch, semi-supervised"
|
||||
}
|
||||
|
||||
@InProceedings{activelearning,
|
||||
author="Faria, Bruno
|
||||
and Perdig{\~a}o, Dylan
|
||||
and Br{\'a}s, Joana
|
||||
and Macedo, Luis",
|
||||
editor="Marreiros, Goreti
|
||||
and Martins, Bruno
|
||||
and Paiva, Ana
|
||||
and Ribeiro, Bernardete
|
||||
and Sardinha, Alberto",
|
||||
title="The Joint Role of Batch Size and Query Strategy in Active Learning-Based Prediction - A Case Study in the Heart Attack Domain",
|
||||
booktitle="Progress in Artificial Intelligence",
|
||||
year="2022",
|
||||
publisher="Springer International Publishing",
|
||||
address="Cham",
|
||||
pages="464--475",
|
||||
abstract="This paper proposes an Active Learning algorithm that could detect heart attacks based on different body measures, which requires much less data than the passive learning counterpart while maintaining similar accuracy. To that end, different parameters were tested, namely the batch size and the query strategy used. The initial tests on batch size consisted of varying its value until 50. From these experiments, the conclusion was that the best results were obtained with lower values, which led to the second set of experiments, varying the batch size between 1 and 5 to understand in which value the accuracy was higher. Four query strategies were tested: random sampling, least confident sampling, margin sampling and entropy sampling. The results of each approach were similar, reducing by 57{\%} to 60{\%} the amount of data required to obtain the same results of the passive learning approach.",
|
||||
isbn="978-3-031-16474-3"
|
||||
}
|
Loading…
Reference in New Issue
Block a user