Last modifications

This commit is contained in:
Fabrice Mouhartem 2018-06-20 17:10:06 +02:00
parent 1ef78ca088
commit c2acf57040
4 changed files with 209 additions and 49 deletions

View File

@ -431,7 +431,7 @@ the equality
\mathbf{A}_{\tau^{(i^\dagger)}} \cdot \left[ \begin{array}{c}
\mathbf{v}_1^\star \\ \hline \mathbf{v}_2^\star
\end{array} \right]
&=& \mathbf{u} + \mathbf{D} \cdot \bit ( \mathbf{D}_0 \cdot {\mathbf{s}^{\star} }
&=& \mathbf{u} + \mathbf{D} \cdot \textsf{bin}( \mathbf{D}_0 \cdot {\mathbf{s}^{\star} }
+ \sum_{k=1}^N \mathbf{D}_k \cdot {\mathfrak{m}_k^{\star} } ) \quad \bmod q.
\end{eqnarray}
@ -442,13 +442,13 @@ such that
\mathbf{v}_1 \\ \hline \mathbf{v}_2
\end{array} \right] = \mathbf{u} + \mathbf{D} \cdot \textsf{bin}( \mathbf{c}_M ) \bmod q. \end{eqnarray}
Relation (\ref{sim-s}) implies that $ \mathbf{c}_M \neq \mathbf{D}_0 \cdot {\mathbf{s}^{\star} }
+ \sum_{k=1}^N \mathbf{D}_k \cdot {\mathfrak{m}_k^{\star} } \bmod q$ by hypothesis. It follows that $\textsf{bin}(\mathbf{c}_M) - \bit ( \mathbf{D}_0 \cdot {\mathbf{s}^{\star} }
+ \sum_{k=1}^N \mathbf{D}_k \cdot {\mathfrak{m}_k^{\star} } \bmod q$ by hypothesis. It follows that $\textsf{bin}(\mathbf{c}_M) - \textsf{bin}( \mathbf{D}_0 \cdot {\mathbf{s}^{\star} }
+ \sum_{k=1}^N \mathbf{D}_k \cdot {\mathfrak{m}_k^{\star} } ) $ is a non-zero vector in $\{-1,0,1\}^m$. Subtracting (\ref{second-sol}) from (\ref{first-sol}), we get
\begin{eqnarray*}
\mathbf{A}_{\tau^{(i^\dagger)}} \cdot \left[\begin{array}{c}
\mathbf{v}_1^\star - \mathbf{v}_1 \\ \hline \mathbf{v}_2^\star - \mathbf{v}_1
\end{array} \right]
&=& \mathbf{D} \cdot \bigl( \textsf{bin}(\mathbf{c}_M) - \bit ( \mathbf{D}_0 \cdot {\mathbf{s}^{\star} }
&=& \mathbf{D} \cdot \bigl( \textsf{bin}(\mathbf{c}_M) - \textsf{bin}( \mathbf{D}_0 \cdot {\mathbf{s}^{\star} }
+ \sum_{k=1}^N \mathbf{D}_k \cdot {\mathfrak{m}_k^{\star} } ) \bigr) \mod q,
\end{eqnarray*}
which implies
@ -456,7 +456,7 @@ which implies
\left[
\begin{array}{c|c} \mathbf{D} \cdot \mathbf{S} ~ &~ \mathbf{D} \cdot (\mathbf{S}_0 +
\sum_{j=1}^\ell \tau^{(i^\dagger)}[j] \cdot \mathbf{S}_j)
\end{array} \right] \cdot \left[ \begin{array}{c} {\mathbf{v}_1^\star -\mathbf{v}_1 } \\ \hline {\mathbf{v}_2^\star - \mathbf{v}_2 } \end{array} \right] \\ = \mathbf{D} \cdot \bigl( \textsf{bin}(\mathbf{c}_M) - \bit ( \mathbf{D}_0 \cdot {\mathbf{s}^{\star} }
\end{array} \right] \cdot \left[ \begin{array}{c} {\mathbf{v}_1^\star -\mathbf{v}_1 } \\ \hline {\mathbf{v}_2^\star - \mathbf{v}_2 } \end{array} \right] \\ = \mathbf{D} \cdot \bigl( \textsf{bin}(\mathbf{c}_M) - \textsf{bin}( \mathbf{D}_0 \cdot {\mathbf{s}^{\star} }
+ \sum_{k=1}^N \mathbf{D}_k \cdot {\mathfrak{m}_k^{\star} } ) \bigr) \mod q .
\end{multline}
The above implies that the vector
@ -466,7 +466,7 @@ which implies
\nonumber && \hspace{2.75cm} ~+ \bit \big( \mathbf{D}_0 \cdot {\mathbf{s}^{\star} } + \sum_{k=1}^N \mathbf{D}_k \cdot {\mathfrak{m}_k^{\star} } \big) - \textsf{bin}(\mathbf{c}_M)
\end{eqnarray}
is a short integer vector of $\Lambda_q^{\perp}(\mathbf{D})$. Indeed, its norm can be bounded as $\| \mathbf{w} \| \leq \beta'' = \sqrt{2} (\ell+2) \sigma^2 m^{3/2} + m^{1/2} $. We argue that it is non-zero with overwhelming probability. We already observed that
$ \bit ( \mathbf{D}_0 \cdot {\mathbf{s}^{\star} }
$ \textsf{bin}( \mathbf{D}_0 \cdot {\mathbf{s}^{\star} }
+ \sum_{k=1}^N \mathbf{D}_k \cdot {\mathfrak{m}_k^{\star} } ) - \textsf{bin}(\mathbf{c}_M)$ is a non-zero vector of $\{-1,0,1\}^m$, which rules out the event that
$({\mathbf{v}_1^\star}, {\mathbf{v}_2^\star} ) =({\mathbf{v}_1} , {\mathbf{v}_2}) $. Hence, we can only have $\mathbf{w}=\mathbf{0}^m$ when the equality
\begin{multline} \label{final-eq}
@ -716,7 +716,7 @@ probabilities during hybrid games where the two distributions are not close in t
Then, it computes $\mathbf{c}_M = \mathbf{D}_0 \cdot \mathbf{s}_0 \in \ZZ_q^{2n}$ for a short Gaussian vector
$\mathbf{s}_0 \sample D_{\ZZ^{2m},\sigma_0}$, which will be used in the $i^\dagger$-th query.
Next, it samples short vectors $\mathbf{v}_1,\mathbf{v}_2 \sample D_{\ZZ^m,\sigma}$ to define
$$\mathbf{u}= \mathbf{A}_{\tau^{(i^\dagger)}} \cdot \begin{bmatrix} \mathbf{v}_1 \\ \mathbf{v}_2 \end{bmatrix} - \mathbf{D} \cdot \bit (\mathbf{c}_M) ~ \in \ZZ_q^n.$$
$$\mathbf{u}= \mathbf{A}_{\tau^{(i^\dagger)}} \cdot \begin{bmatrix} \mathbf{v}_1 \\ \mathbf{v}_2 \end{bmatrix} - \mathbf{D} \cdot \textsf{bin}(\mathbf{c}_M) ~ \in \ZZ_q^n.$$
In addition, $\bdv$ picks extra small-norm matrices $\mathbf{R}_1,\ldots,\mathbf{R}_N \in \ZZ^{2m \times 2m}$ whose columns are sampled from $D_{\ZZ^m,\sigma}$, which
are used to define randomizations of $\mathbf{D}_0$ by computing $\mathbf{D}_k = \mathbf{D}_0 \cdot \mathbf{R}_k$ for each $k \in \{1,\ldots,N\}$.
The adversary is given public parameters $\mathsf{par}\coloneqq \{\mathbf{B},\mathbf{G}_0,\mathbf{G}_1,CK\}$, where $CK=\{\mathbf{D}_k\}_{k=0}^N$, and the public key $PK\coloneqq \big( \mathbf{A}, \{\mathbf{A}_j\}_{j=0}^\ell, \mathbf{D},\mathbf{u} \big)$.
@ -1070,18 +1070,18 @@ The scheme is secure against misidentification attacks under the $\SIS_{n,2m,q,\
The case $coin=1$ corresponds to $\bdv$'s expectation that the knowledge extractor will obtain the identifier $ \mathsf{id}^\star = \mathsf{id}^\dagger$ of a group member in
$ U^a$ (i.e., a group member that was legitimately introduced at the $i^\star$-th $\mathcal{Q}_{\ajoin}$-query, for some $i^\star \in \{1,\ldots,Q_a\}$, where the identifier
$\mathsf{id}^\dagger$ is used by $\mathcal{Q}_{\ajoin}$),
but $\bit ( \mathbf{v}^\star ) \in \{0,1\}^{2m}$ (which is encrypted in in $\mathbf{c}_{\mathbf{v}_i}^\star$ as part of the forgery $\Sigma^\star$) and the extracted $\mathbf{s}^\star \in \ZZ^{2m}$ are such that $ \bit \bigl( \mathbf{D}_0 \cdot \bit ( \mathbf{v}^\star ) + \mathbf{D}_1 \cdot \mathbf{s}^\star \bigr) \in \{0,1\}^m $
but $\textsf{bin}( \mathbf{v}^\star ) \in \{0,1\}^{2m}$ (which is encrypted in in $\mathbf{c}_{\mathbf{v}_i}^\star$ as part of the forgery $\Sigma^\star$) and the extracted $\mathbf{s}^\star \in \ZZ^{2m}$ are such that $ \bit \bigl( \mathbf{D}_0 \cdot \textsf{bin}( \mathbf{v}^\star ) + \mathbf{D}_1 \cdot \mathbf{s}^\star \bigr) \in \{0,1\}^m $
does not match
the string $ \bit \bigl( \mathbf{D}_0 \cdot \bit ( \mathbf{v}_{i^\star} ) + \mathbf{D}_1 \cdot \mathbf{s}_{i^\star} \bigr) \in \{0,1\}^{2m} $ for which
the string $ \bit \bigl( \mathbf{D}_0 \cdot \textsf{bin}( \mathbf{v}_{i^\star} ) + \mathbf{D}_1 \cdot \mathbf{s}_{i^\star} \bigr) \in \{0,1\}^{2m} $ for which
user $i^\star$ obtained a membership certificate at the $i^\star$-th $\mathcal{Q}_{\ajoin}$-query. When $coin=1$, the choice of $i^\star$ corresponds to a guess that the knowledge
extractor will reveal an $\ell$-bit identifier that coincides with the identifier $\mathsf{id}^\dagger$ assigned to the user introduced at the $i^\star$-th $\mathcal{Q}_{\ajoin}$-query.
The last case $coin=2$ corresponds to $\bdv$'s expectation that decrypting $\mathbf{c}_{\mathbf{v}_i}^\star$ (which is part of $\Sigma^\star$) and running
the knowledge extractor on $\adv$ will uncover vectors $\bit ( \mathbf{v}^\star ) \in \{0,1\}^{2m}$, $\mathbf{w}^\star \in \{0,1\}^m$ and $\mathbf{s}^\star \in \ZZ^{2m}$
the knowledge extractor on $\adv$ will uncover vectors $\textsf{bin}( \mathbf{v}^\star ) \in \{0,1\}^{2m}$, $\mathbf{w}^\star \in \{0,1\}^m$ and $\mathbf{s}^\star \in \ZZ^{2m}$
such that $\mathbf{w}^\star= \textsf{bin}(\mathbf{D}_0 \cdot \textsf{bin}(\mathbf{v}^\star) + \mathbf{D}_1 \cdot \mathbf{s}^\star )$ and
\begin{eqnarray} \label{collide}
\bit \bigl( \mathbf{D}_0 \cdot \bit ( \mathbf{v}^\star ) + \mathbf{D}_1 \cdot \mathbf{s}^\star \bigr) = \bit \bigl( \mathbf{D}_0 \cdot \bit ( \mathbf{v}_{i^\star} ) + \mathbf{D}_1 \cdot \mathbf{s}_{i^\star} \bigr)
\bit \bigl( \mathbf{D}_0 \cdot \textsf{bin}( \mathbf{v}^\star ) + \mathbf{D}_1 \cdot \mathbf{s}^\star \bigr) = \bit \bigl( \mathbf{D}_0 \cdot \textsf{bin}( \mathbf{v}_{i^\star} ) + \mathbf{D}_1 \cdot \mathbf{s}_{i^\star} \bigr)
\end{eqnarray}
but $(\bit ( \mathbf{v}^\star ), \mathbf{s}^\star) \neq ( \bit ( \mathbf{v}_{i^\star} ), \mathbf{s}_{i^\star} ) $, where $ \mathbf{v}_{i^\star} \in \Zq^{4n}$ and $\mathbf{s}_{i^\star} \in \ZZ^{2m}$ are the vectors
but $(\textsf{bin}( \mathbf{v}^\star ), \mathbf{s}^\star) \neq ( \textsf{bin}( \mathbf{v}_{i^\star} ), \mathbf{s}_{i^\star} ) $, where $ \mathbf{v}_{i^\star} \in \Zq^{4n}$ and $\mathbf{s}_{i^\star} \in \ZZ^{2m}$ are the vectors
involved in the $i^\star$-th $\mathcal{Q}_{\ajoin}$-query.
\\
\indent

View File

@ -38,6 +38,7 @@ In this section, we first present the general principles and basic tools to hand
\end{definition}
\begin{definition}[Proof of knowledge \cite{GMR85,BG92}]
\label{de:pok}
\index{Zero Knowledge!Proof of knowledge}
Let $\kappa$ be a function from $\bit^\star$ to $[0,1]$. A complete interactive proof system $(P,V)$ is said to be a \textit{proof of knowledge} for the relation $R$ with knowledge error $\kappa$ if it verifies the knowledge soundness property.
\begin{description}
@ -54,6 +55,7 @@ Another useful property that a proof system can have in the context of privacy-p
This property states that if a proof system has multiple witnesses, it is impossible to tell apart which one has been used during the proof.
\begin{definition}[Witness indistinguishable proofs~\cite{FS90}]
\label{de:wi} \index{Zero Knowledge!Witness Indistinguishability}
Let $(P,V)$ be a complete interactive proof system for relation $R$. It is said to be \textit{witness indistinguishable} if, for every $\ppt$ algorithm $\hat{V}$ and every two sequences $\{w_x\}_{(x, w_x) \in R}$, $\{w'_x\}_{(x,w'_x) \in R}$, the following ensembles are computationally indistinguishable:
\begin{align*}
\{ \trans(P(x, w_x), \hat{V}(x) \}_x && \mbox{and} && \{ \trans( P(x, w'_x), \hat{V}(x) \}_x.

View File

@ -5,74 +5,123 @@
In this thesis, we presented new cryptographic schemes that rely on lattice or pairing assumptions.
These contributions focus on the design and analysis of new cryptographic schemes that target privacy-preserving applications.
In pairing-based cryptography, we proposed a practical dynamic group signature scheme, for which security is well-understood.
It relies on broadly used assumptions with simple and constant-size descriptions which exist for more than ten years.
In pairing-based cryptography, we proposed a practical dynamic group signature scheme, whose security relies on well-understood assumptions in the random oracle.
It relies on widely used assumptions with simple and constant-size descriptions which have been studied for more than ten years.
This work is also supported by an implementation in \texttt{C}.
The results in the lattice setting give rise to three fundamental schemes that were missing in the landscape of lattice-based privacy-preserving cryptography.
Even if these schemes suffer from some efficiency issues due to their novelty, we do believe that they are one step towards a quantum-secure privacy-friendly world.
The results in the lattice setting give rise to three realizations of fundamental primitives that were missing in the landscape of lattice-based privacy-preserving cryptography.
Even if these schemes suffer from a lack of efficiency due to their novelty, we do believe that they take one step towards a quantum-secure privacy-friendly world.
On the road, improvements have been made in the state of the art of zero-knowledge proofs in the lattice setting by providing building blocks that, we believe, are of independent interest.
As of our signature with efficient protocols, it has already been used to design a lattice-based e-cash system~\cite{LLNW17}.
For example, our signature with efficient protocols has already been used to design a privacy-preserving lattice-based e-cash system~\cite{LLNW17}.
All these works are proven under strong security models under simple assumptions.
This provides a breeding ground for new theoretical constructions.
\section*{Open Problems}
The path of providing new cryptographic primitives and proving them is disseminated with pitfalls.
The most obvious questions that stem from this work are about how to tackle the trade-offs we made in the design of those primitives.
The path of providing new cryptographic primitives and proving them secure is full of pitfalls.
The most obvious questions that stem from this work are how to tackle the trade-offs we made in the design of those primitives.
\begin{question}
Is it possible to build a fully-simulatable adaptive oblivious transfer with access control secure under $\LWE$ with polynomially large modulus?
Is it possible to build a fully-simulatable adaptive oblivious transfer (even without access control) secure under $\LWE$ with polynomially large modulus?
\end{question}
In other words, is it possible to avoid the use of smudging to guarantee message-privacy in the adaptive oblivious transfer scheme of~\cref{ch:ot-lwe}.
As is, this issue arises from the use of Regev's encryption scheme, which does not guarantee this index privacy.
However, while finer analysis on GSW ciphertexts~\cite{GSW13} seems promising to achieve this at reasonable cost~\cite{BDPMW16}, they do not suffice in our setting because they wold leak the norm of the noise vector of ciphertexts.
Then, the main difficulty is to have zero-knowledge proofs compatible with the access control and the encryption layers.
In other words, is it possible to avoid the use of noise flooding to guarantee receiver-security in the adaptive oblivious transfer scheme of~\cref{ch:ot-lwe}.
In our current protocol, this issue arises from the use of Regev's encryption scheme, where we need to prevent the noise distribution from leaking the receiver's index.
However, while a finer analysis on GSW ciphertexts~\cite{GSW13} seems promising to achieve this at reasonable cost~\cite{BDPMW16}, it is not sufficient in our setting because it would leak the norm of the noise vector of ciphertexts.
Then, the main difficulty is to have zero-knowledge proofs compatible with the access control and the encryption components.
\begin{question}
Can we construct provably-secure adaptive oblivious transfer schemes in the universal composability model?
\end{question}
Our adaptive oblivious transfer scheme relies on zero-knowledge proofs to hedge against malicious adversaries.
The security proofs take advantage of the fact that the proofs can be rewound to extract a witness (as described in~\cref{de:pok}).
The Peikert-Vaikuntanathan-Waters~\cite{PVW08} construction, based on dual-mode encryption, achieves $1$-out-of-$2$ composable oblivious transfer (which can be generalized to $1$-out-of-$2^t$ OT), without relying on zero-knowledge proofs, but it does not implies OT with adaptive queries (i.e., where each index $\rho_i$ may depend on previous transfers $\rho_1, \ldots, \rho_{i-1}$).
Actually, the use of $\ZK$ proofs is not impossible in this setting, as shown by the pairing-based construction of Green and Hohenberger~\cite{GH08}.
However, this protocol uses the trapdoor extractability of Groth-Sahai proofs~\cite{GS08} to achieve straight-line extraction. It is not known to be possible in the lattice setting.
\begin{question}
Can we obtain a more efficient compact e-cash system from lattice assumptions?
\end{question}
Another privacy-preserving primitive is compact e-cash~\cite{Cha82,Cha83,CHL05a}. As explained in the introduction, it is the digital equivalent of real-life money.
A body of research followed its introduction~\cite{CFN88,OO91,CP92,FY93,Oka95,Tsi97}, and the first compact realization was given by Camenisch, Hohenberger and Lysyanskaya~\cite{CHL05a} (``compact'' means that the complexity of coin transfers is at most logarithmic in the value of withdrawn wallets).
Before the work of Libert, Ling, Nguyen and Wang~\cite{LLNW17}, all compact construction were based on discrete-logarithm-based technique.
This construction still suffers from efficiency issues akin to the problem we met in this thesis.
It is thus interesting to improve the efficiency of this scheme and obtain viable constructions of anonymous e-cash from post-quantum assumptions.
\subsection*{Zero-Knowledge Proofs}
\begin{question}
Can we provide NIZK proofs in the standard model for all $\NP$ languages relying on standard $\LWE$ assumption only?
Can we provide NIZK proofs in the standard model for all $\NP$ languages while relying on the standard $\LWE$ assumption only?
\end{question}
Extending the work of Groth, Ostrovsky and Sahai~\cite{GOS06} in the lattice setting would be a great improvement for lattice-based privacy-preserving cryptography.
This question remains open for more than $10$ years~\cite{KW18}.
Recent line of work makes steps forward in this direction~\cite{RSS18}, but rely on primitives that do not exist yet ($\NIZK$ proofs for a variant of the bounded decoding distance problem).
Extending the work of Groth, Ostrovsky and Sahai~\cite{GOS06} to the lattice setting would be a breakthrough result for lattice-based cryptography in general.
This question remains open for more than $10$ years~\cite{PV08}.
A recent line of work makes steps forward in this direction~\cite{KW18,RSS18}, but rely on primitives that do not exist yet~\cite{RSS18} ($\NIZK$ proofs for a variant of the bounded decoding distance problem) or assume pre-processing~\cite{KW18}.
The Stern-like proof system we work on in during this thesis, despite being flexible enough to prove a large variety of statements, suffers from the stiffness of being combinatorial.
The choice of permutations used to ensure zero-knowledgeness (and thus witness-indistinguishability) is quite strict, and force the challenge space to be ternary.
This proves to be a real bottleneck in the efficiency of such proof systems.
The Stern-like proof systems we studied in this thesis, despite being flexible enough to prove a large variety of statements, suffer from the stiffness of being combinatorial.
The choice of permutations used to ensure the zero-knowledge property (and thus witness-indistinguishability) is quite strict, and force the challenge space to be ternary.
This turns out to be a real bottleneck in the efficiency of such proof systems.
\begin{question}
Is it possible to construct zero-knowledge protocols for average-case problems that take advantage of the geometry of lattices?
%Is it possible to construct zero-knowledge protocols for average-case problems that take advantage of the geometry of lattices?
Can we get negligible soundness error in one shot for expressive statements in the post-quantum setting?
\end{question}
As explained in~\cref{ch:zka}, nowadays lattice-based proof systems for $\SIS$/$\LWE$ rely either on the additional structure lying in special families of lattices, or on the combinatorial nature of representations of lattices in terms of matrices.
If the natural structure of a lattice is a group, additive noise or witness-length restrictions forbid the use of standard group-based cryptography to undertake this problem.
However, lattices naturally carry a strong geometrical structure, as exploited in~\cite{MV03,PV08} to construct (interactive and non-interactive) zero-knowledge proofs for some worst-case lattice problems.
It may be an interesting question to see if the restricted geometry of average-case lattice problems can be exploited to provide such proofs.
%If these proof systems can be used after applying a transformation from average-case to worst-case problem, this methodology is highly inefficient and does not close the question.
This question can be restated as ``can we combine the expressivity of Stern-like proofs with the efficiency of Schnorr-like proof with rejection sampling?''.
For Stern-like proofs, decreasing the soundness error from $2/3$ to $1/2$ would already be an interesting improvements with a direct impact on all lattice-based schemes presented in this thesis.
Recall that \textit{soundness error} is the probability that a cheating prover convinces an honest verifier of a false statement. As long as it is noticeably different from $1$, it is possible to make the soundness negligible by repeating the protocol.
Likewise, isogeny-based proof systems~\cite{JDF11,GPS17} suffer from similar issues as the challenge space is small (binary).
The $2/3$ soundness error is also present in~\cite{IKOS07},
which is a technique to obtain zero-knowledge proofs relying on secure multi-party computation.
With this technique, however, the size of the proof is proportional to the size of the circuit describing the relation we want to prove (which is not the case with Stern-like protocols).
On the other hand, the soundness error of one round of the protocol is at most $2/3$.
Thus, the question of having efficient post-quantum zero-knowledge proofs for expressive statements is a difficult question and remains open as of today.
As we explained in the introduction, advanced cryptography from lattices often suffers from the use of lattice trapdoors.
Thus, a natural question may be:
%If these proof systems can be used after applying a transformation from average-case to worst-case problem, this methodology is highly inefficient and does not close the question.
\subsection*{Cryptographic Constructions}
\begin{question}
Does an efficient trapdoor-free \textsf{(H)IBE} exists?
Can we construct more efficient lattice-based signature schemes compatible with zero-knowledge proofs?
\end{question}
For instance, in the group encryption scheme of~\cref{ch:ge-lwe}, trapdoors are used in two places.
To have a secure public key encryption scheme under adaptive chosen-ciphertext attacks and for the signature scheme.
Both these primitives are induced by identity-based encryption: the Canetti-Halevi-Katz transform generically turns an \textsf{IBE} into a \textsf{IND-CCA2} \PKE~\cite{CHK04}, and signatures are directly implied from \textsf{IND-CPA-}secure \textsf{IBE}~\cite{BF01,BLS01}.
%Actually, even the question of having a trapdoorless \textsf{IND-CCA2} public key encryption scheme still remains an open question.
Actually, a recent construction from Brakerski, Lombardi, Segev and Vaikuntanathan~\cite{BLSV18} gives a candidate which relies on garble circuits, and is fairly inefficient compared to \textsf{IBE}s with trapdoors.
Even the question of an \textsf{IND-CCA2} public key encryption still does not have a satisfactory response.
The construction of Peikert and Waters~\cite{PW08} is indeed trapdoor-free, but is still less efficient than trapdoor-based ones.
In the general lattice setting, the most efficient signature schemes require at least as many matrices as the length $\ell$ of the random tag used in the signature (like the scheme in~\cref{se:gs-lwe-sigep}).
This cost has direct impact on the efficiency and public-key size of schemes or protocols that use them, like in our group signatures of~\cref{ch:gs-lwe}, where $\ell$ is logarithmic in the maximal number of members the group can accept $\Ngs$.
In ideal lattices, it is possible to reduce this cost to a vector of size $\ell$~\cite{DM14}.
In the group signature scheme of~\cite{LNWX18}, which is based on ideal lattice problems, they use this property to allow an exponential number of group members to join the group, and thus propose a ``constant-size'' group signature scheme.
The method used to construct this group signature is essentially the same as in \cref{ch:gs-lwe}, where matrices are hidden in the ring structure of the ideal lattice~\cite{LS14}.
Hence, the dependency on $\log \Ngs$ is actually hidden in the dimension of the ring.
As these signatures are a fundamental building block for privacy-preserving cryptography, any improvement on them has a direct impact on the primitives that use them as a building block.
\begin{question}
Can we obtain more efficient lattice-based one-time signatures in general lattices?
\end{question}
In our group signature and group encryption schemes (in \cref{ch:gs-lwe} and \cref{ch:ge-lwe} respectively), the signature and the ciphertext contain a public key for a one-time signature scheme.
One efficiency issue is that, in lattice-based one-time signatures~\cite{LM08,Moh11}, the public-key contains a full matrix, that is part of the signature/ciphertext.
Therefore, this matrix significantly increase the size of the signature/ciphertext.
As security requirements for one-time signature are weaker than full-fledged signatures (namely, the adversary has access to only one signature per public key), we can hope for constructions of one-time signatures based on general lattices where the public-key is smaller that a full-matrix.
As we explained in the introduction, advanced cryptography from lattices often suffers from the use of lattice trapdoors.
Thus, a natural question may be:
\begin{question}
Does an efficient trapdoor-free \textsf{(H)IBE} exist?
\end{question}
In the group encryption scheme of~\cref{ch:ge-lwe}, for instance, trapdoors are used for two distinct purposes.
They are used to build a secure public-key encryption scheme under adaptive chosen-ciphertext attacks and a signature scheme.
These primitives are both induced by identity-based encryption: the Canetti-Halevi-Katz transform generically turns an \textsf{IBE} into a \textsf{IND-CCA2} \PKE~\cite{CHK04}, and signatures are directly implied from \textsf{IND-CPA-}secure \textsf{IBE}~\cite{BF01,BLS01}.
%Actually, even the question of having a trapdoorless \textsf{IND-CCA2} public key encryption scheme still remains an open question.
Actually, a recent construction from Brakerski, Lombardi, Segev and Vaikuntanathan~\cite{BLSV18} (inspired by~\cite{DG17a}) gives a candidate which relies on garbled circuits, and is fairly inefficient compared to \textsf{IBE} schemes with trapdoors.
Even the question of a trapdoor-less \textsf{IND-CCA2} public key encryption still does not have a satisfactory response.
The construction of Peikert and Waters~\cite{PW08} is trapdoor-free, but remains very expensive.
\begin{comment}
\begin{question}
Can we achieve better security proofs for cryptographic schemes?
\end{question}
@ -82,3 +131,5 @@ As explained in~\cref{ch:proofs}, it is important to rely on simple assumptions
Given recent advances in cryptographic proofs~\cite{Hof16,KMP16,Hof17}, it is now possible to attain stronger security notions than what was claimed before~\cite{DSYC18}.
Another line of work targets the quality of the reduction, aiming for \textit{tight security}~\cite{GHKW16,AHN+17,LJYP14,LPJY15,LSSS17}.
This improves the understanding of the links between cryptographic schemes and hardness assumptions, leading to more reliable constructions.
\end{comment}

113
these.bib
View File

@ -322,7 +322,7 @@
@InProceedings{CHL05,
author = {Camenisch, Jan and Hohenberger, Susan and Lysyanskaya, Anna},
title = {Balancing Accountability and Privacy Using E-Cash},
title = {{Balancing Accountability and Privacy Using E-Cash}},
booktitle = {{SCN}},
year = {2005},
number = {4116},
@ -506,7 +506,7 @@
@InProceedings{CP92,
author = {Chaum, D. and Pedersen, T.},
title = {Transferred Cash Grows in Size},
title = {{Transferred Cash Grows in Size}},
booktitle = {{Eurocrypt}},
year = {1992},
volume = {658},
@ -1743,7 +1743,7 @@
@InProceedings{GH08,
author = {Matthew Green and Susan Hohenberger},
title = {Universally Composable Adaptive Oblivious Transfer},
title = {{Universally Composable Adaptive Oblivious Transfer}},
booktitle = {{Asiacrypt}},
year = {2008},
number = {5350},
@ -3129,4 +3129,111 @@
publisher = {ACM},
}
@InProceedings{LNWX18,
author = {Ling, San and Nguyen, Khoa and Wang, Huaxiong and Xu, Yanhong},
title = {{Constant-Size Group Signatures from Lattices}},
booktitle = {PKC},
year = {2018},
series = {LNCS},
pages = {58--88},
publisher = {Springer},
abstract = {Lattice-based group signature is an active research topic in recent years. Since the pioneering work by Gordon, Katz and Vaikuntanathan (Asiacrypt 2010), ten other schemes have been proposed, providing various improvements in terms of security, efficiency and functionality. However, in all known constructions, one has to fix the number N of group users in the setup stage, and as a consequence, the signature sizes are dependent on N.},
isbn = {978-3-319-76581-5},
}
@InProceedings{DM14,
author = {Ducas, L{\'e}o and Micciancio, Daniele},
title = {{Improved Short Lattice Signatures in the Standard Model}},
booktitle = {Crypto},
year = {2014},
series = {LNCS},
pages = {335--352},
publisher = {Springer},
abstract = {We present a signature scheme provably secure in the standard model (no random oracles) based on the worst-case complexity of approximating the Shortest Vector Problem in ideal lattices within polynomial factors. The distinguishing feature of our scheme is that it achieves short signatures (consisting of a single lattice vector), and relatively short public keys (consisting of O(logn) vectors.) Previous lattice schemes in the standard model with similarly short signatures, due to Boyen (PKC 2010) and Micciancio and Peikert (Eurocrypt 2012), had substantially longer public keys consisting of $\Omega$(n) vectors (even when implemented with ideal lattices).},
isbn = {978-3-662-44371-2},
}
@InProceedings{Moh11,
author = {Mohassel, Payman},
title = {{One-Time Signatures and Chameleon Hash Functions}},
booktitle = {SAC},
year = {2011},
series = {LCNS},
pages = {302--319},
publisher = {Springer},
abstract = {In this work we show a general construction for transforming any chameleon hash function to a strongly unforgeable one-time signature scheme. Combined with the result of [Bellare and Ristov, PKC 2007], this also implies a general construction of strongly unforgeable one-time signatures from $\Sigma$-protocols in the standard model.},
isbn = {978-3-642-19574-7},
}
@InProceedings{LM08,
author = {Lyubashevsky, Vadim and Micciancio, Daniele},
title = {{Asymptotically Efficient Lattice-Based Digital Signatures}},
booktitle = {TCC},
year = {2008},
series = {LNCS},
pages = {37--54},
publisher = {Springer},
abstract = {We give a direct construction of digital signatures based on the complexity of approximating the shortest vector in ideal (e.g., cyclic) lattices. The construction is provably secure based on the worst-case hardness of approximating the shortest vector in such lattices within a polynomial factor, and it is also asymptotically efficient: the time complexity of the signing and verification algorithms, as well as key and signature size is almost linear (up to poly-logarithmic factors) in the dimension n of the underlying lattice. Since no sub-exponential (in n) time algorithm is known to solve lattice problems in the worst case, even when restricted to cyclic lattices, our construction gives a digital signature scheme with an essentially optimal performance/security trade-off.},
isbn = {978-3-540-78524-8},
}
@Article{LS14,
author = {Langlois, Adeline and Stehlé, Damien},
title = {{Worst-case to average-case reductions for module lattices}},
journal = {Designs, Codes and Cryptography},
year = {2014},
}
@InProceedings{DG17a,
author = {D{\"o}ttling, Nico and Garg, Sanjam},
title = {{From Selective IBE to Full IBE and Selective HIBE}},
booktitle = {TCC},
year = {2017},
series = {LNCS},
pages = {372--408},
publisher = {Springer},
abstract = {Starting with any selectively secure identity-based encryption (IBE) scheme, we give generic constructions of fully secure IBE and selectively secure hierarchical IBE (HIBE) schemes. Our HIBE scheme allows for delegation arbitrarily many times.},
isbn = {978-3-319-70500-2},
}
@InProceedings{IKOS07,
author = {Ishai, Yuval and Kushilevitz, Eyal and Ostrovsky, Rafail and Sahai, Amit},
title = {{Zero-knowledge from Secure Multiparty Computation}},
booktitle = {STOC},
year = {2007},
pages = {21--30},
publisher = {ACM},
acmid = {1250794},
doi = {10.1145/1250790.1250794},
isbn = {978-1-59593-631-8},
keywords = {black-box reductions, cryptography, secure computation, zero-knowledge},
location = {San Diego, California, USA},
numpages = {10},
url = {http://doi.acm.org/10.1145/1250790.1250794},
}
@InProceedings{JDF11,
author = {Jao, David and De Feo, Luca},
title = {{Towards Quantum-Resistant Cryptosystems from Supersingular Elliptic Curve Isogenies}},
booktitle = {PQCrypto},
year = {2011},
series = {LNCS},
pages = {19--34},
publisher = {Springer},
abstract = {We present new candidates for quantum-resistant public-key cryptosystems based on the conjectured difficulty of finding isogenies between supersingular elliptic curves. The main technical idea in our scheme is that we transmit the images of torsion bases under the isogeny in order to allow the two parties to arrive at a common shared key despite the noncommutativity of the endomorphism ring. Our work is motivated by the recent development of a subexponential-time quantum algorithm for constructing isogenies between ordinary elliptic curves. In the supersingular case, by contrast, the fastest known quantum attack remains exponential, since the noncommutativity of the endomorphism ring means that the approach used in the ordinary case does not apply. We give a precise formulation of the necessary computational assumption along with a discussion of its validity. In addition, we present implementation results showing that our protocols are multiple orders of magnitude faster than previous isogeny-based cryptosystems over ordinary curves.},
isbn = {978-3-642-25405-5},
}
@InProceedings{GPS17,
author = {Galbraith, Steven D. and Petit, Christophe and Silva, Javier},
title = {{Identification Protocols and Signature Schemes Based on Supersingular Isogeny Problems}},
booktitle = {Asiacrypt},
year = {2017},
series = {LNCS},
pages = {3--33},
publisher = {Springer},
abstract = {We provide a new identification protocol and new signature schemes based on isogeny problems. Our identification protocol relies on the hardness of the endomorphism ring computation problem, arguably the hardest of all problems in this area, whereas the only previous scheme based on isogenies (due to De Feo, Jao and Pl{\^u}t) relied on potentially easier problems. The protocol makes novel use of an algorithm of Kohel-Lauter-Petit-Tignol for the quaternion version of the {\$}{\$}{\backslash}ell {\$}{\$} -isogeny problem, for which we provide a more complete description and analysis. Our new signature schemes are derived from the identification protocols using the Fiat-Shamir (respectively, Unruh) transforms for classical (respectively, post-quantum) security. We study their efficiency, highlighting very small key sizes and reasonably efficient signing and verification algorithms.},
isbn = {978-3-319-70694-8},
}
@Comment{jabref-meta: databaseType:bibtex;}